python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH On-Chip RTC Support
*
* Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
* Copyright (C) 2008 Angelo Castello
*
* Based on the old arch/sh/kernel/cpu/rtc.c by:
*
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/clk.h>
#include <linux/slab.h>
#ifdef CONFIG_SUPERH
#include <asm/rtc.h>
#else
/* Default values for RZ/A RTC */
#define rtc_reg_size sizeof(u16)
#define RTC_BIT_INVERTED 0 /* no chip bugs */
#define RTC_CAP_4_DIGIT_YEAR (1 << 0)
#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
#endif
#define DRV_NAME "sh-rtc"
#define RTC_REG(r) ((r) * rtc_reg_size)
#define R64CNT RTC_REG(0)
#define RSECCNT RTC_REG(1) /* RTC sec */
#define RMINCNT RTC_REG(2) /* RTC min */
#define RHRCNT RTC_REG(3) /* RTC hour */
#define RWKCNT RTC_REG(4) /* RTC week */
#define RDAYCNT RTC_REG(5) /* RTC day */
#define RMONCNT RTC_REG(6) /* RTC month */
#define RYRCNT RTC_REG(7) /* RTC year */
#define RSECAR RTC_REG(8) /* ALARM sec */
#define RMINAR RTC_REG(9) /* ALARM min */
#define RHRAR RTC_REG(10) /* ALARM hour */
#define RWKAR RTC_REG(11) /* ALARM week */
#define RDAYAR RTC_REG(12) /* ALARM day */
#define RMONAR RTC_REG(13) /* ALARM month */
#define RCR1 RTC_REG(14) /* Control */
#define RCR2 RTC_REG(15) /* Control */
/*
* Note on RYRAR and RCR3: Up until this point most of the register
* definitions are consistent across all of the available parts. However,
* the placement of the optional RYRAR and RCR3 (the RYRAR control
* register used to control RYRCNT/RYRAR compare) varies considerably
* across various parts, occasionally being mapped in to a completely
* unrelated address space. For proper RYRAR support a separate resource
* would have to be handed off, but as this is purely optional in
* practice, we simply opt not to support it, thereby keeping the code
* quite a bit more simplified.
*/
/* ALARM Bits - or with BCD encoded value */
#define AR_ENB 0x80 /* Enable for alarm cmp */
/* Period Bits */
#define PF_HP 0x100 /* Enable Half Period to support 8,32,128Hz */
#define PF_COUNT 0x200 /* Half periodic counter */
#define PF_OXS 0x400 /* Periodic One x Second */
#define PF_KOU 0x800 /* Kernel or User periodic request 1=kernel */
#define PF_MASK 0xf00
/* RCR1 Bits */
#define RCR1_CF 0x80 /* Carry Flag */
#define RCR1_CIE 0x10 /* Carry Interrupt Enable */
#define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
#define RCR1_AF 0x01 /* Alarm Flag */
/* RCR2 Bits */
#define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
#define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
#define RCR2_RTCEN 0x08 /* ENable RTC */
#define RCR2_ADJ 0x04 /* ADJustment (30-second) */
#define RCR2_RESET 0x02 /* Reset bit */
#define RCR2_START 0x01 /* Start bit */
struct sh_rtc {
void __iomem *regbase;
unsigned long regsize;
struct resource *res;
int alarm_irq;
int periodic_irq;
int carry_irq;
struct clk *clk;
struct rtc_device *rtc_dev;
spinlock_t lock;
unsigned long capabilities; /* See asm/rtc.h for cap bits */
unsigned short periodic_freq;
};
static int __sh_rtc_interrupt(struct sh_rtc *rtc)
{
unsigned int tmp, pending;
tmp = readb(rtc->regbase + RCR1);
pending = tmp & RCR1_CF;
tmp &= ~RCR1_CF;
writeb(tmp, rtc->regbase + RCR1);
/* Users have requested One x Second IRQ */
if (pending && rtc->periodic_freq & PF_OXS)
rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
return pending;
}
static int __sh_rtc_alarm(struct sh_rtc *rtc)
{
unsigned int tmp, pending;
tmp = readb(rtc->regbase + RCR1);
pending = tmp & RCR1_AF;
tmp &= ~(RCR1_AF | RCR1_AIE);
writeb(tmp, rtc->regbase + RCR1);
if (pending)
rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
return pending;
}
static int __sh_rtc_periodic(struct sh_rtc *rtc)
{
unsigned int tmp, pending;
tmp = readb(rtc->regbase + RCR2);
pending = tmp & RCR2_PEF;
tmp &= ~RCR2_PEF;
writeb(tmp, rtc->regbase + RCR2);
if (!pending)
return 0;
/* Half period enabled than one skipped and the next notified */
if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT))
rtc->periodic_freq &= ~PF_COUNT;
else {
if (rtc->periodic_freq & PF_HP)
rtc->periodic_freq |= PF_COUNT;
rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
}
return pending;
}
static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
{
struct sh_rtc *rtc = dev_id;
int ret;
spin_lock(&rtc->lock);
ret = __sh_rtc_interrupt(rtc);
spin_unlock(&rtc->lock);
return IRQ_RETVAL(ret);
}
static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
{
struct sh_rtc *rtc = dev_id;
int ret;
spin_lock(&rtc->lock);
ret = __sh_rtc_alarm(rtc);
spin_unlock(&rtc->lock);
return IRQ_RETVAL(ret);
}
static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
{
struct sh_rtc *rtc = dev_id;
int ret;
spin_lock(&rtc->lock);
ret = __sh_rtc_periodic(rtc);
spin_unlock(&rtc->lock);
return IRQ_RETVAL(ret);
}
static irqreturn_t sh_rtc_shared(int irq, void *dev_id)
{
struct sh_rtc *rtc = dev_id;
int ret;
spin_lock(&rtc->lock);
ret = __sh_rtc_interrupt(rtc);
ret |= __sh_rtc_alarm(rtc);
ret |= __sh_rtc_periodic(rtc);
spin_unlock(&rtc->lock);
return IRQ_RETVAL(ret);
}
static inline void sh_rtc_setaie(struct device *dev, unsigned int enable)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int tmp;
spin_lock_irq(&rtc->lock);
tmp = readb(rtc->regbase + RCR1);
if (enable)
tmp |= RCR1_AIE;
else
tmp &= ~RCR1_AIE;
writeb(tmp, rtc->regbase + RCR1);
spin_unlock_irq(&rtc->lock);
}
static int sh_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int tmp;
tmp = readb(rtc->regbase + RCR1);
seq_printf(seq, "carry_IRQ\t: %s\n", (tmp & RCR1_CIE) ? "yes" : "no");
tmp = readb(rtc->regbase + RCR2);
seq_printf(seq, "periodic_IRQ\t: %s\n",
(tmp & RCR2_PESMASK) ? "yes" : "no");
return 0;
}
static inline void sh_rtc_setcie(struct device *dev, unsigned int enable)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int tmp;
spin_lock_irq(&rtc->lock);
tmp = readb(rtc->regbase + RCR1);
if (!enable)
tmp &= ~RCR1_CIE;
else
tmp |= RCR1_CIE;
writeb(tmp, rtc->regbase + RCR1);
spin_unlock_irq(&rtc->lock);
}
static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
sh_rtc_setaie(dev, enabled);
return 0;
}
static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int sec128, sec2, yr, yr100, cf_bit;
if (!(readb(rtc->regbase + RCR2) & RCR2_RTCEN))
return -EINVAL;
do {
unsigned int tmp;
spin_lock_irq(&rtc->lock);
tmp = readb(rtc->regbase + RCR1);
tmp &= ~RCR1_CF; /* Clear CF-bit */
tmp |= RCR1_CIE;
writeb(tmp, rtc->regbase + RCR1);
sec128 = readb(rtc->regbase + R64CNT);
tm->tm_sec = bcd2bin(readb(rtc->regbase + RSECCNT));
tm->tm_min = bcd2bin(readb(rtc->regbase + RMINCNT));
tm->tm_hour = bcd2bin(readb(rtc->regbase + RHRCNT));
tm->tm_wday = bcd2bin(readb(rtc->regbase + RWKCNT));
tm->tm_mday = bcd2bin(readb(rtc->regbase + RDAYCNT));
tm->tm_mon = bcd2bin(readb(rtc->regbase + RMONCNT)) - 1;
if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
yr = readw(rtc->regbase + RYRCNT);
yr100 = bcd2bin(yr >> 8);
yr &= 0xff;
} else {
yr = readb(rtc->regbase + RYRCNT);
yr100 = bcd2bin((yr == 0x99) ? 0x19 : 0x20);
}
tm->tm_year = (yr100 * 100 + bcd2bin(yr)) - 1900;
sec2 = readb(rtc->regbase + R64CNT);
cf_bit = readb(rtc->regbase + RCR1) & RCR1_CF;
spin_unlock_irq(&rtc->lock);
} while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0);
#if RTC_BIT_INVERTED != 0
if ((sec128 & RTC_BIT_INVERTED))
tm->tm_sec--;
#endif
/* only keep the carry interrupt enabled if UIE is on */
if (!(rtc->periodic_freq & PF_OXS))
sh_rtc_setcie(dev, 0);
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
return 0;
}
static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int tmp;
int year;
spin_lock_irq(&rtc->lock);
/* Reset pre-scaler & stop RTC */
tmp = readb(rtc->regbase + RCR2);
tmp |= RCR2_RESET;
tmp &= ~RCR2_START;
writeb(tmp, rtc->regbase + RCR2);
writeb(bin2bcd(tm->tm_sec), rtc->regbase + RSECCNT);
writeb(bin2bcd(tm->tm_min), rtc->regbase + RMINCNT);
writeb(bin2bcd(tm->tm_hour), rtc->regbase + RHRCNT);
writeb(bin2bcd(tm->tm_wday), rtc->regbase + RWKCNT);
writeb(bin2bcd(tm->tm_mday), rtc->regbase + RDAYCNT);
writeb(bin2bcd(tm->tm_mon + 1), rtc->regbase + RMONCNT);
if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
year = (bin2bcd((tm->tm_year + 1900) / 100) << 8) |
bin2bcd(tm->tm_year % 100);
writew(year, rtc->regbase + RYRCNT);
} else {
year = tm->tm_year % 100;
writeb(bin2bcd(year), rtc->regbase + RYRCNT);
}
/* Start RTC */
tmp = readb(rtc->regbase + RCR2);
tmp &= ~RCR2_RESET;
tmp |= RCR2_RTCEN | RCR2_START;
writeb(tmp, rtc->regbase + RCR2);
spin_unlock_irq(&rtc->lock);
return 0;
}
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {
byte &= ~AR_ENB; /* strip the enable bit */
value = bcd2bin(byte);
}
return value;
}
static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
struct rtc_time *tm = &wkalrm->time;
spin_lock_irq(&rtc->lock);
tm->tm_sec = sh_rtc_read_alarm_value(rtc, RSECAR);
tm->tm_min = sh_rtc_read_alarm_value(rtc, RMINAR);
tm->tm_hour = sh_rtc_read_alarm_value(rtc, RHRAR);
tm->tm_wday = sh_rtc_read_alarm_value(rtc, RWKAR);
tm->tm_mday = sh_rtc_read_alarm_value(rtc, RDAYAR);
tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR);
if (tm->tm_mon > 0)
tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0;
spin_unlock_irq(&rtc->lock);
return 0;
}
static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc,
int value, int reg_off)
{
/* < 0 for a value that is ignored */
if (value < 0)
writeb(0, rtc->regbase + reg_off);
else
writeb(bin2bcd(value) | AR_ENB, rtc->regbase + reg_off);
}
static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int rcr1;
struct rtc_time *tm = &wkalrm->time;
int mon;
spin_lock_irq(&rtc->lock);
/* disable alarm interrupt and clear the alarm flag */
rcr1 = readb(rtc->regbase + RCR1);
rcr1 &= ~(RCR1_AF | RCR1_AIE);
writeb(rcr1, rtc->regbase + RCR1);
/* set alarm time */
sh_rtc_write_alarm_value(rtc, tm->tm_sec, RSECAR);
sh_rtc_write_alarm_value(rtc, tm->tm_min, RMINAR);
sh_rtc_write_alarm_value(rtc, tm->tm_hour, RHRAR);
sh_rtc_write_alarm_value(rtc, tm->tm_wday, RWKAR);
sh_rtc_write_alarm_value(rtc, tm->tm_mday, RDAYAR);
mon = tm->tm_mon;
if (mon >= 0)
mon += 1;
sh_rtc_write_alarm_value(rtc, mon, RMONAR);
if (wkalrm->enabled) {
rcr1 |= RCR1_AIE;
writeb(rcr1, rtc->regbase + RCR1);
}
spin_unlock_irq(&rtc->lock);
return 0;
}
static const struct rtc_class_ops sh_rtc_ops = {
.read_time = sh_rtc_read_time,
.set_time = sh_rtc_set_time,
.read_alarm = sh_rtc_read_alarm,
.set_alarm = sh_rtc_set_alarm,
.proc = sh_rtc_proc,
.alarm_irq_enable = sh_rtc_alarm_irq_enable,
};
static int __init sh_rtc_probe(struct platform_device *pdev)
{
struct sh_rtc *rtc;
struct resource *res;
char clk_name[6];
int clk_id, ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc))
return -ENOMEM;
spin_lock_init(&rtc->lock);
/* get periodic/carry/alarm irqs */
ret = platform_get_irq(pdev, 0);
if (unlikely(ret <= 0)) {
dev_err(&pdev->dev, "No IRQ resource\n");
return -ENOENT;
}
rtc->periodic_irq = ret;
rtc->carry_irq = platform_get_irq(pdev, 1);
rtc->alarm_irq = platform_get_irq(pdev, 2);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "No IO resource\n");
return -ENOENT;
}
rtc->regsize = resource_size(res);
rtc->res = devm_request_mem_region(&pdev->dev, res->start,
rtc->regsize, pdev->name);
if (unlikely(!rtc->res))
return -EBUSY;
rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, rtc->regsize);
if (unlikely(!rtc->regbase))
return -EINVAL;
if (!pdev->dev.of_node) {
clk_id = pdev->id;
/* With a single device, the clock id is still "rtc0" */
if (clk_id < 0)
clk_id = 0;
snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id);
} else
snprintf(clk_name, sizeof(clk_name), "fck");
rtc->clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(rtc->clk)) {
/*
* No error handling for rtc->clk intentionally, not all
* platforms will have a unique clock for the RTC, and
* the clk API can handle the struct clk pointer being
* NULL.
*/
rtc->clk = NULL;
}
rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
clk_enable(rtc->clk);
rtc->capabilities = RTC_DEF_CAPABILITIES;
#ifdef CONFIG_SUPERH
if (dev_get_platdata(&pdev->dev)) {
struct sh_rtc_platform_info *pinfo =
dev_get_platdata(&pdev->dev);
/*
* Some CPUs have special capabilities in addition to the
* default set. Add those in here.
*/
rtc->capabilities |= pinfo->capabilities;
}
#endif
if (rtc->carry_irq <= 0) {
/* register shared periodic/carry/alarm irq */
ret = devm_request_irq(&pdev->dev, rtc->periodic_irq,
sh_rtc_shared, 0, "sh-rtc", rtc);
if (unlikely(ret)) {
dev_err(&pdev->dev,
"request IRQ failed with %d, IRQ %d\n", ret,
rtc->periodic_irq);
goto err_unmap;
}
} else {
/* register periodic/carry/alarm irqs */
ret = devm_request_irq(&pdev->dev, rtc->periodic_irq,
sh_rtc_periodic, 0, "sh-rtc period", rtc);
if (unlikely(ret)) {
dev_err(&pdev->dev,
"request period IRQ failed with %d, IRQ %d\n",
ret, rtc->periodic_irq);
goto err_unmap;
}
ret = devm_request_irq(&pdev->dev, rtc->carry_irq,
sh_rtc_interrupt, 0, "sh-rtc carry", rtc);
if (unlikely(ret)) {
dev_err(&pdev->dev,
"request carry IRQ failed with %d, IRQ %d\n",
ret, rtc->carry_irq);
goto err_unmap;
}
ret = devm_request_irq(&pdev->dev, rtc->alarm_irq,
sh_rtc_alarm, 0, "sh-rtc alarm", rtc);
if (unlikely(ret)) {
dev_err(&pdev->dev,
"request alarm IRQ failed with %d, IRQ %d\n",
ret, rtc->alarm_irq);
goto err_unmap;
}
}
platform_set_drvdata(pdev, rtc);
/* everything disabled by default */
sh_rtc_setaie(&pdev->dev, 0);
sh_rtc_setcie(&pdev->dev, 0);
rtc->rtc_dev->ops = &sh_rtc_ops;
rtc->rtc_dev->max_user_freq = 256;
if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
rtc->rtc_dev->range_max = RTC_TIMESTAMP_END_9999;
} else {
rtc->rtc_dev->range_min = mktime64(1999, 1, 1, 0, 0, 0);
rtc->rtc_dev->range_max = mktime64(2098, 12, 31, 23, 59, 59);
}
ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret)
goto err_unmap;
device_init_wakeup(&pdev->dev, 1);
return 0;
err_unmap:
clk_disable(rtc->clk);
return ret;
}
static int __exit sh_rtc_remove(struct platform_device *pdev)
{
struct sh_rtc *rtc = platform_get_drvdata(pdev);
sh_rtc_setaie(&pdev->dev, 0);
sh_rtc_setcie(&pdev->dev, 0);
clk_disable(rtc->clk);
return 0;
}
static void sh_rtc_set_irq_wake(struct device *dev, int enabled)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
irq_set_irq_wake(rtc->periodic_irq, enabled);
if (rtc->carry_irq > 0) {
irq_set_irq_wake(rtc->carry_irq, enabled);
irq_set_irq_wake(rtc->alarm_irq, enabled);
}
}
static int __maybe_unused sh_rtc_suspend(struct device *dev)
{
if (device_may_wakeup(dev))
sh_rtc_set_irq_wake(dev, 1);
return 0;
}
static int __maybe_unused sh_rtc_resume(struct device *dev)
{
if (device_may_wakeup(dev))
sh_rtc_set_irq_wake(dev, 0);
return 0;
}
static SIMPLE_DEV_PM_OPS(sh_rtc_pm_ops, sh_rtc_suspend, sh_rtc_resume);
static const struct of_device_id sh_rtc_of_match[] = {
{ .compatible = "renesas,sh-rtc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sh_rtc_of_match);
static struct platform_driver sh_rtc_platform_driver = {
.driver = {
.name = DRV_NAME,
.pm = &sh_rtc_pm_ops,
.of_match_table = sh_rtc_of_match,
},
.remove = __exit_p(sh_rtc_remove),
};
module_platform_driver_probe(sh_rtc_platform_driver, sh_rtc_probe);
MODULE_DESCRIPTION("SuperH on-chip RTC driver");
MODULE_AUTHOR("Paul Mundt <[email protected]>, "
"Jamie Lenehan <[email protected]>, "
"Angelo Castello <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/rtc/rtc-sh.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc-ds1307.c - RTC driver for some mostly-compatible I2C chips.
*
* Copyright (C) 2005 James Chapman (ds1337 core)
* Copyright (C) 2006 David Brownell
* Copyright (C) 2009 Matthias Fuchs (rx8025 support)
* Copyright (C) 2012 Bertrand Achard (nvram access fixes)
*/
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kstrtox.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/rtc/ds1307.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
* to have set the chip up as a clock (turning on the oscillator and
* setting the date and time), Linux can ignore the non-clock features.
* That's a natural job for a factory or repair bench.
*/
enum ds_type {
unknown_ds_type, /* always first and 0 */
ds_1307,
ds_1308,
ds_1337,
ds_1338,
ds_1339,
ds_1340,
ds_1341,
ds_1388,
ds_3231,
m41t0,
m41t00,
m41t11,
mcp794xx,
rx_8025,
rx_8130,
last_ds_type /* always last */
/* rs5c372 too? different address... */
};
/* RTC registers don't differ much, except for the century flag */
#define DS1307_REG_SECS 0x00 /* 00-59 */
# define DS1307_BIT_CH 0x80
# define DS1340_BIT_nEOSC 0x80
# define MCP794XX_BIT_ST 0x80
#define DS1307_REG_MIN 0x01 /* 00-59 */
# define M41T0_BIT_OF 0x80
#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
# define DS1307_BIT_PM 0x20 /* in REG_HOUR */
# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
#define DS1307_REG_WDAY 0x03 /* 01-07 */
# define MCP794XX_BIT_VBATEN 0x08
#define DS1307_REG_MDAY 0x04 /* 01-31 */
#define DS1307_REG_MONTH 0x05 /* 01-12 */
# define DS1337_BIT_CENTURY 0x80 /* in REG_MONTH */
#define DS1307_REG_YEAR 0x06 /* 00-99 */
/*
* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
* start at 7, and they differ a LOT. Only control and status matter for
* basic RTC date and time functionality; be careful using them.
*/
#define DS1307_REG_CONTROL 0x07 /* or ds1338 */
# define DS1307_BIT_OUT 0x80
# define DS1338_BIT_OSF 0x20
# define DS1307_BIT_SQWE 0x10
# define DS1307_BIT_RS1 0x02
# define DS1307_BIT_RS0 0x01
#define DS1337_REG_CONTROL 0x0e
# define DS1337_BIT_nEOSC 0x80
# define DS1339_BIT_BBSQI 0x20
# define DS3231_BIT_BBSQW 0x40 /* same as BBSQI */
# define DS1337_BIT_RS2 0x10
# define DS1337_BIT_RS1 0x08
# define DS1337_BIT_INTCN 0x04
# define DS1337_BIT_A2IE 0x02
# define DS1337_BIT_A1IE 0x01
#define DS1340_REG_CONTROL 0x07
# define DS1340_BIT_OUT 0x80
# define DS1340_BIT_FT 0x40
# define DS1340_BIT_CALIB_SIGN 0x20
# define DS1340_M_CALIBRATION 0x1f
#define DS1340_REG_FLAG 0x09
# define DS1340_BIT_OSF 0x80
#define DS1337_REG_STATUS 0x0f
# define DS1337_BIT_OSF 0x80
# define DS3231_BIT_EN32KHZ 0x08
# define DS1337_BIT_A2I 0x02
# define DS1337_BIT_A1I 0x01
#define DS1339_REG_ALARM1_SECS 0x07
#define DS13XX_TRICKLE_CHARGER_MAGIC 0xa0
#define RX8025_REG_CTRL1 0x0e
# define RX8025_BIT_2412 0x20
#define RX8025_REG_CTRL2 0x0f
# define RX8025_BIT_PON 0x10
# define RX8025_BIT_VDET 0x40
# define RX8025_BIT_XST 0x20
#define RX8130_REG_ALARM_MIN 0x17
#define RX8130_REG_ALARM_HOUR 0x18
#define RX8130_REG_ALARM_WEEK_OR_DAY 0x19
#define RX8130_REG_EXTENSION 0x1c
#define RX8130_REG_EXTENSION_WADA BIT(3)
#define RX8130_REG_FLAG 0x1d
#define RX8130_REG_FLAG_VLF BIT(1)
#define RX8130_REG_FLAG_AF BIT(3)
#define RX8130_REG_CONTROL0 0x1e
#define RX8130_REG_CONTROL0_AIE BIT(3)
#define RX8130_REG_CONTROL1 0x1f
#define RX8130_REG_CONTROL1_INIEN BIT(4)
#define RX8130_REG_CONTROL1_CHGEN BIT(5)
#define MCP794XX_REG_CONTROL 0x07
# define MCP794XX_BIT_ALM0_EN 0x10
# define MCP794XX_BIT_ALM1_EN 0x20
#define MCP794XX_REG_ALARM0_BASE 0x0a
#define MCP794XX_REG_ALARM0_CTRL 0x0d
#define MCP794XX_REG_ALARM1_BASE 0x11
#define MCP794XX_REG_ALARM1_CTRL 0x14
# define MCP794XX_BIT_ALMX_IF BIT(3)
# define MCP794XX_BIT_ALMX_C0 BIT(4)
# define MCP794XX_BIT_ALMX_C1 BIT(5)
# define MCP794XX_BIT_ALMX_C2 BIT(6)
# define MCP794XX_BIT_ALMX_POL BIT(7)
# define MCP794XX_MSK_ALMX_MATCH (MCP794XX_BIT_ALMX_C0 | \
MCP794XX_BIT_ALMX_C1 | \
MCP794XX_BIT_ALMX_C2)
#define M41TXX_REG_CONTROL 0x07
# define M41TXX_BIT_OUT BIT(7)
# define M41TXX_BIT_FT BIT(6)
# define M41TXX_BIT_CALIB_SIGN BIT(5)
# define M41TXX_M_CALIBRATION GENMASK(4, 0)
#define DS1388_REG_WDOG_HUN_SECS 0x08
#define DS1388_REG_WDOG_SECS 0x09
#define DS1388_REG_FLAG 0x0b
# define DS1388_BIT_WF BIT(6)
# define DS1388_BIT_OSF BIT(7)
#define DS1388_REG_CONTROL 0x0c
# define DS1388_BIT_RST BIT(0)
# define DS1388_BIT_WDE BIT(1)
# define DS1388_BIT_nEOSC BIT(7)
/* negative offset step is -2.034ppm */
#define M41TXX_NEG_OFFSET_STEP_PPB 2034
/* positive offset step is +4.068ppm */
#define M41TXX_POS_OFFSET_STEP_PPB 4068
/* Min and max values supported with 'offset' interface by M41TXX */
#define M41TXX_MIN_OFFSET ((-31) * M41TXX_NEG_OFFSET_STEP_PPB)
#define M41TXX_MAX_OFFSET ((31) * M41TXX_POS_OFFSET_STEP_PPB)
struct ds1307 {
enum ds_type type;
struct device *dev;
struct regmap *regmap;
const char *name;
struct rtc_device *rtc;
#ifdef CONFIG_COMMON_CLK
struct clk_hw clks[2];
#endif
};
struct chip_desc {
unsigned alarm:1;
u16 nvram_offset;
u16 nvram_size;
u8 offset; /* register's offset */
u8 century_reg;
u8 century_enable_bit;
u8 century_bit;
u8 bbsqi_bit;
irq_handler_t irq_handler;
const struct rtc_class_ops *rtc_ops;
u16 trickle_charger_reg;
u8 (*do_trickle_setup)(struct ds1307 *, u32,
bool);
/* Does the RTC require trickle-resistor-ohms to select the value of
* the resistor between Vcc and Vbackup?
*/
bool requires_trickle_resistor;
/* Some RTC's batteries and supercaps were charged by default, others
* allow charging but were not configured previously to do so.
* Remember this behavior to stay backwards compatible.
*/
bool charge_default;
};
static const struct chip_desc chips[last_ds_type];
static int ds1307_get_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int tmp, ret;
const struct chip_desc *chip = &chips[ds1307->type];
u8 regs[7];
if (ds1307->type == rx_8130) {
unsigned int regflag;
ret = regmap_read(ds1307->regmap, RX8130_REG_FLAG, ®flag);
if (ret) {
dev_err(dev, "%s error %d\n", "read", ret);
return ret;
}
if (regflag & RX8130_REG_FLAG_VLF) {
dev_warn_once(dev, "oscillator failed, set time!\n");
return -EINVAL;
}
}
/* read the RTC date and time registers all at once */
ret = regmap_bulk_read(ds1307->regmap, chip->offset, regs,
sizeof(regs));
if (ret) {
dev_err(dev, "%s error %d\n", "read", ret);
return ret;
}
dev_dbg(dev, "%s: %7ph\n", "read", regs);
/* if oscillator fail bit is set, no data can be trusted */
if (ds1307->type == m41t0 &&
regs[DS1307_REG_MIN] & M41T0_BIT_OF) {
dev_warn_once(dev, "oscillator failed, set time!\n");
return -EINVAL;
}
tmp = regs[DS1307_REG_SECS];
switch (ds1307->type) {
case ds_1307:
case m41t0:
case m41t00:
case m41t11:
if (tmp & DS1307_BIT_CH)
return -EINVAL;
break;
case ds_1308:
case ds_1338:
if (tmp & DS1307_BIT_CH)
return -EINVAL;
ret = regmap_read(ds1307->regmap, DS1307_REG_CONTROL, &tmp);
if (ret)
return ret;
if (tmp & DS1338_BIT_OSF)
return -EINVAL;
break;
case ds_1340:
if (tmp & DS1340_BIT_nEOSC)
return -EINVAL;
ret = regmap_read(ds1307->regmap, DS1340_REG_FLAG, &tmp);
if (ret)
return ret;
if (tmp & DS1340_BIT_OSF)
return -EINVAL;
break;
case ds_1388:
ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp);
if (ret)
return ret;
if (tmp & DS1388_BIT_OSF)
return -EINVAL;
break;
case mcp794xx:
if (!(tmp & MCP794XX_BIT_ST))
return -EINVAL;
break;
default:
break;
}
t->tm_sec = bcd2bin(regs[DS1307_REG_SECS] & 0x7f);
t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
tmp = regs[DS1307_REG_HOUR] & 0x3f;
t->tm_hour = bcd2bin(tmp);
/* rx8130 is bit position, not BCD */
if (ds1307->type == rx_8130)
t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
else
t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
tmp = regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
t->tm_year = bcd2bin(regs[DS1307_REG_YEAR]) + 100;
if (regs[chip->century_reg] & chip->century_bit &&
IS_ENABLED(CONFIG_RTC_DRV_DS1307_CENTURY))
t->tm_year += 100;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read", t->tm_sec, t->tm_min,
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
return 0;
}
static int ds1307_set_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
const struct chip_desc *chip = &chips[ds1307->type];
int result;
int tmp;
u8 regs[7];
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"write", t->tm_sec, t->tm_min,
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
if (t->tm_year < 100)
return -EINVAL;
#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
if (t->tm_year > (chip->century_bit ? 299 : 199))
return -EINVAL;
#else
if (t->tm_year > 199)
return -EINVAL;
#endif
regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
/* rx8130 is bit position, not BCD */
if (ds1307->type == rx_8130)
regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
else
regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
/* assume 20YY not 19YY */
tmp = t->tm_year - 100;
regs[DS1307_REG_YEAR] = bin2bcd(tmp);
if (chip->century_enable_bit)
regs[chip->century_reg] |= chip->century_enable_bit;
if (t->tm_year > 199 && chip->century_bit)
regs[chip->century_reg] |= chip->century_bit;
switch (ds1307->type) {
case ds_1308:
case ds_1338:
regmap_update_bits(ds1307->regmap, DS1307_REG_CONTROL,
DS1338_BIT_OSF, 0);
break;
case ds_1340:
regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
DS1340_BIT_OSF, 0);
break;
case ds_1388:
regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
DS1388_BIT_OSF, 0);
break;
case mcp794xx:
/*
* these bits were cleared when preparing the date/time
* values and need to be set again before writing the
* regsfer out to the device.
*/
regs[DS1307_REG_SECS] |= MCP794XX_BIT_ST;
regs[DS1307_REG_WDAY] |= MCP794XX_BIT_VBATEN;
break;
default:
break;
}
dev_dbg(dev, "%s: %7ph\n", "write", regs);
result = regmap_bulk_write(ds1307->regmap, chip->offset, regs,
sizeof(regs));
if (result) {
dev_err(dev, "%s error %d\n", "write", result);
return result;
}
if (ds1307->type == rx_8130) {
/* clear Voltage Loss Flag as data is available now */
result = regmap_write(ds1307->regmap, RX8130_REG_FLAG,
~(u8)RX8130_REG_FLAG_VLF);
if (result) {
dev_err(dev, "%s error %d\n", "write", result);
return result;
}
}
return 0;
}
static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int ret;
u8 regs[9];
/* read all ALARM1, ALARM2, and status registers at once */
ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS,
regs, sizeof(regs));
if (ret) {
dev_err(dev, "%s error %d\n", "alarm read", ret);
return ret;
}
dev_dbg(dev, "%s: %4ph, %3ph, %2ph\n", "alarm read",
®s[0], ®s[4], ®s[7]);
/*
* report alarm time (ALARM1); assume 24 hour and day-of-month modes,
* and that all four fields are checked matches
*/
t->time.tm_sec = bcd2bin(regs[0] & 0x7f);
t->time.tm_min = bcd2bin(regs[1] & 0x7f);
t->time.tm_hour = bcd2bin(regs[2] & 0x3f);
t->time.tm_mday = bcd2bin(regs[3] & 0x3f);
/* ... and status */
t->enabled = !!(regs[7] & DS1337_BIT_A1IE);
t->pending = !!(regs[8] & DS1337_BIT_A1I);
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, enabled=%d, pending=%d\n",
"alarm read", t->time.tm_sec, t->time.tm_min,
t->time.tm_hour, t->time.tm_mday,
t->enabled, t->pending);
return 0;
}
static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char regs[9];
u8 control, status;
int ret;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, enabled=%d, pending=%d\n",
"alarm set", t->time.tm_sec, t->time.tm_min,
t->time.tm_hour, t->time.tm_mday,
t->enabled, t->pending);
/* read current status of both alarms and the chip */
ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS, regs,
sizeof(regs));
if (ret) {
dev_err(dev, "%s error %d\n", "alarm write", ret);
return ret;
}
control = regs[7];
status = regs[8];
dev_dbg(dev, "%s: %4ph, %3ph, %02x %02x\n", "alarm set (old status)",
®s[0], ®s[4], control, status);
/* set ALARM1, using 24 hour and day-of-month modes */
regs[0] = bin2bcd(t->time.tm_sec);
regs[1] = bin2bcd(t->time.tm_min);
regs[2] = bin2bcd(t->time.tm_hour);
regs[3] = bin2bcd(t->time.tm_mday);
/* set ALARM2 to non-garbage */
regs[4] = 0;
regs[5] = 0;
regs[6] = 0;
/* disable alarms */
regs[7] = control & ~(DS1337_BIT_A1IE | DS1337_BIT_A2IE);
regs[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I);
ret = regmap_bulk_write(ds1307->regmap, DS1339_REG_ALARM1_SECS, regs,
sizeof(regs));
if (ret) {
dev_err(dev, "can't set alarm time\n");
return ret;
}
/* optionally enable ALARM1 */
if (t->enabled) {
dev_dbg(dev, "alarm IRQ armed\n");
regs[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */
regmap_write(ds1307->regmap, DS1337_REG_CONTROL, regs[7]);
}
return 0;
}
static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
return regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
DS1337_BIT_A1IE,
enabled ? DS1337_BIT_A1IE : 0);
}
static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, u32 ohms, bool diode)
{
u8 setup = (diode) ? DS1307_TRICKLE_CHARGER_DIODE :
DS1307_TRICKLE_CHARGER_NO_DIODE;
setup |= DS13XX_TRICKLE_CHARGER_MAGIC;
switch (ohms) {
case 250:
setup |= DS1307_TRICKLE_CHARGER_250_OHM;
break;
case 2000:
setup |= DS1307_TRICKLE_CHARGER_2K_OHM;
break;
case 4000:
setup |= DS1307_TRICKLE_CHARGER_4K_OHM;
break;
default:
dev_warn(ds1307->dev,
"Unsupported ohm value %u in dt\n", ohms);
return 0;
}
return setup;
}
static u8 do_trickle_setup_rx8130(struct ds1307 *ds1307, u32 ohms, bool diode)
{
/* make sure that the backup battery is enabled */
u8 setup = RX8130_REG_CONTROL1_INIEN;
if (diode)
setup |= RX8130_REG_CONTROL1_CHGEN;
return setup;
}
static irqreturn_t rx8130_irq(int irq, void *dev_id)
{
struct ds1307 *ds1307 = dev_id;
u8 ctl[3];
int ret;
rtc_lock(ds1307->rtc);
/* Read control registers. */
ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
sizeof(ctl));
if (ret < 0)
goto out;
if (!(ctl[1] & RX8130_REG_FLAG_AF))
goto out;
ctl[1] &= ~RX8130_REG_FLAG_AF;
ctl[2] &= ~RX8130_REG_CONTROL0_AIE;
ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
sizeof(ctl));
if (ret < 0)
goto out;
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
out:
rtc_unlock(ds1307->rtc);
return IRQ_HANDLED;
}
static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 ald[3], ctl[3];
int ret;
/* Read alarm registers. */
ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_ALARM_MIN, ald,
sizeof(ald));
if (ret < 0)
return ret;
/* Read control registers. */
ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
sizeof(ctl));
if (ret < 0)
return ret;
t->enabled = !!(ctl[2] & RX8130_REG_CONTROL0_AIE);
t->pending = !!(ctl[1] & RX8130_REG_FLAG_AF);
/* Report alarm 0 time assuming 24-hour and day-of-month modes. */
t->time.tm_sec = -1;
t->time.tm_min = bcd2bin(ald[0] & 0x7f);
t->time.tm_hour = bcd2bin(ald[1] & 0x7f);
t->time.tm_wday = -1;
t->time.tm_mday = bcd2bin(ald[2] & 0x7f);
t->time.tm_mon = -1;
t->time.tm_year = -1;
t->time.tm_yday = -1;
t->time.tm_isdst = -1;
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d enabled=%d\n",
__func__, t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled);
return 0;
}
static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 ald[3], ctl[3];
int ret;
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t->time.tm_wday, t->time.tm_mday, t->time.tm_mon,
t->enabled, t->pending);
/* Read control registers. */
ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
sizeof(ctl));
if (ret < 0)
return ret;
ctl[0] &= RX8130_REG_EXTENSION_WADA;
ctl[1] &= ~RX8130_REG_FLAG_AF;
ctl[2] &= ~RX8130_REG_CONTROL0_AIE;
ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
sizeof(ctl));
if (ret < 0)
return ret;
/* Hardware alarm precision is 1 minute! */
ald[0] = bin2bcd(t->time.tm_min);
ald[1] = bin2bcd(t->time.tm_hour);
ald[2] = bin2bcd(t->time.tm_mday);
ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_ALARM_MIN, ald,
sizeof(ald));
if (ret < 0)
return ret;
if (!t->enabled)
return 0;
ctl[2] |= RX8130_REG_CONTROL0_AIE;
return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, ctl[2]);
}
static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int ret, reg;
ret = regmap_read(ds1307->regmap, RX8130_REG_CONTROL0, ®);
if (ret < 0)
return ret;
if (enabled)
reg |= RX8130_REG_CONTROL0_AIE;
else
reg &= ~RX8130_REG_CONTROL0_AIE;
return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, reg);
}
static irqreturn_t mcp794xx_irq(int irq, void *dev_id)
{
struct ds1307 *ds1307 = dev_id;
struct mutex *lock = &ds1307->rtc->ops_lock;
int reg, ret;
mutex_lock(lock);
/* Check and clear alarm 0 interrupt flag. */
ret = regmap_read(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, ®);
if (ret)
goto out;
if (!(reg & MCP794XX_BIT_ALMX_IF))
goto out;
reg &= ~MCP794XX_BIT_ALMX_IF;
ret = regmap_write(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, reg);
if (ret)
goto out;
/* Disable alarm 0. */
ret = regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL,
MCP794XX_BIT_ALM0_EN, 0);
if (ret)
goto out;
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
out:
mutex_unlock(lock);
return IRQ_HANDLED;
}
static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 regs[10];
int ret;
/* Read control and alarm 0 registers. */
ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs,
sizeof(regs));
if (ret)
return ret;
t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN);
/* Report alarm 0 time assuming 24-hour and day-of-month modes. */
t->time.tm_sec = bcd2bin(regs[3] & 0x7f);
t->time.tm_min = bcd2bin(regs[4] & 0x7f);
t->time.tm_hour = bcd2bin(regs[5] & 0x3f);
t->time.tm_wday = bcd2bin(regs[6] & 0x7) - 1;
t->time.tm_mday = bcd2bin(regs[7] & 0x3f);
t->time.tm_mon = bcd2bin(regs[8] & 0x1f) - 1;
t->time.tm_year = -1;
t->time.tm_yday = -1;
t->time.tm_isdst = -1;
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d polarity=%d irq=%d match=%lu\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled,
!!(regs[6] & MCP794XX_BIT_ALMX_POL),
!!(regs[6] & MCP794XX_BIT_ALMX_IF),
(regs[6] & MCP794XX_MSK_ALMX_MATCH) >> 4);
return 0;
}
/*
* We may have a random RTC weekday, therefore calculate alarm weekday based
* on current weekday we read from the RTC timekeeping regs
*/
static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm)
{
struct rtc_time tm_now;
int days_now, days_alarm, ret;
ret = ds1307_get_time(dev, &tm_now);
if (ret)
return ret;
days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60);
days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60);
return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1;
}
static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char regs[10];
int wday, ret;
wday = mcp794xx_alm_weekday(dev, &t->time);
if (wday < 0)
return wday;
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
t->time.tm_wday, t->time.tm_mday, t->time.tm_mon,
t->enabled, t->pending);
/* Read control and alarm 0 registers. */
ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs,
sizeof(regs));
if (ret)
return ret;
/* Set alarm 0, using 24-hour and day-of-month modes. */
regs[3] = bin2bcd(t->time.tm_sec);
regs[4] = bin2bcd(t->time.tm_min);
regs[5] = bin2bcd(t->time.tm_hour);
regs[6] = wday;
regs[7] = bin2bcd(t->time.tm_mday);
regs[8] = bin2bcd(t->time.tm_mon + 1);
/* Clear the alarm 0 interrupt flag. */
regs[6] &= ~MCP794XX_BIT_ALMX_IF;
/* Set alarm match: second, minute, hour, day, date, month. */
regs[6] |= MCP794XX_MSK_ALMX_MATCH;
/* Disable interrupt. We will not enable until completely programmed */
regs[0] &= ~MCP794XX_BIT_ALM0_EN;
ret = regmap_bulk_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs,
sizeof(regs));
if (ret)
return ret;
if (!t->enabled)
return 0;
regs[0] |= MCP794XX_BIT_ALM0_EN;
return regmap_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs[0]);
}
static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
return regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL,
MCP794XX_BIT_ALM0_EN,
enabled ? MCP794XX_BIT_ALM0_EN : 0);
}
static int m41txx_rtc_read_offset(struct device *dev, long *offset)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned int ctrl_reg;
u8 val;
regmap_read(ds1307->regmap, M41TXX_REG_CONTROL, &ctrl_reg);
val = ctrl_reg & M41TXX_M_CALIBRATION;
/* check if positive */
if (ctrl_reg & M41TXX_BIT_CALIB_SIGN)
*offset = (val * M41TXX_POS_OFFSET_STEP_PPB);
else
*offset = -(val * M41TXX_NEG_OFFSET_STEP_PPB);
return 0;
}
static int m41txx_rtc_set_offset(struct device *dev, long offset)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned int ctrl_reg;
if ((offset < M41TXX_MIN_OFFSET) || (offset > M41TXX_MAX_OFFSET))
return -ERANGE;
if (offset >= 0) {
ctrl_reg = DIV_ROUND_CLOSEST(offset,
M41TXX_POS_OFFSET_STEP_PPB);
ctrl_reg |= M41TXX_BIT_CALIB_SIGN;
} else {
ctrl_reg = DIV_ROUND_CLOSEST(abs(offset),
M41TXX_NEG_OFFSET_STEP_PPB);
}
return regmap_update_bits(ds1307->regmap, M41TXX_REG_CONTROL,
M41TXX_M_CALIBRATION | M41TXX_BIT_CALIB_SIGN,
ctrl_reg);
}
#ifdef CONFIG_WATCHDOG_CORE
static int ds1388_wdt_start(struct watchdog_device *wdt_dev)
{
struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
u8 regs[2];
int ret;
ret = regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
DS1388_BIT_WF, 0);
if (ret)
return ret;
ret = regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
DS1388_BIT_WDE | DS1388_BIT_RST, 0);
if (ret)
return ret;
/*
* watchdog timeouts are measured in seconds. So ignore hundredths of
* seconds field.
*/
regs[0] = 0;
regs[1] = bin2bcd(wdt_dev->timeout);
ret = regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
sizeof(regs));
if (ret)
return ret;
return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
DS1388_BIT_WDE | DS1388_BIT_RST,
DS1388_BIT_WDE | DS1388_BIT_RST);
}
static int ds1388_wdt_stop(struct watchdog_device *wdt_dev)
{
struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
DS1388_BIT_WDE | DS1388_BIT_RST, 0);
}
static int ds1388_wdt_ping(struct watchdog_device *wdt_dev)
{
struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
u8 regs[2];
return regmap_bulk_read(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
sizeof(regs));
}
static int ds1388_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int val)
{
struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
u8 regs[2];
wdt_dev->timeout = val;
regs[0] = 0;
regs[1] = bin2bcd(wdt_dev->timeout);
return regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
sizeof(regs));
}
#endif
static const struct rtc_class_ops rx8130_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
.read_alarm = rx8130_read_alarm,
.set_alarm = rx8130_set_alarm,
.alarm_irq_enable = rx8130_alarm_irq_enable,
};
static const struct rtc_class_ops mcp794xx_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
.read_alarm = mcp794xx_read_alarm,
.set_alarm = mcp794xx_set_alarm,
.alarm_irq_enable = mcp794xx_alarm_irq_enable,
};
static const struct rtc_class_ops m41txx_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
.read_alarm = ds1337_read_alarm,
.set_alarm = ds1337_set_alarm,
.alarm_irq_enable = ds1307_alarm_irq_enable,
.read_offset = m41txx_rtc_read_offset,
.set_offset = m41txx_rtc_set_offset,
};
static const struct chip_desc chips[last_ds_type] = {
[ds_1307] = {
.nvram_offset = 8,
.nvram_size = 56,
},
[ds_1308] = {
.nvram_offset = 8,
.nvram_size = 56,
},
[ds_1337] = {
.alarm = 1,
.century_reg = DS1307_REG_MONTH,
.century_bit = DS1337_BIT_CENTURY,
},
[ds_1338] = {
.nvram_offset = 8,
.nvram_size = 56,
},
[ds_1339] = {
.alarm = 1,
.century_reg = DS1307_REG_MONTH,
.century_bit = DS1337_BIT_CENTURY,
.bbsqi_bit = DS1339_BIT_BBSQI,
.trickle_charger_reg = 0x10,
.do_trickle_setup = &do_trickle_setup_ds1339,
.requires_trickle_resistor = true,
.charge_default = true,
},
[ds_1340] = {
.century_reg = DS1307_REG_HOUR,
.century_enable_bit = DS1340_BIT_CENTURY_EN,
.century_bit = DS1340_BIT_CENTURY,
.do_trickle_setup = &do_trickle_setup_ds1339,
.trickle_charger_reg = 0x08,
.requires_trickle_resistor = true,
.charge_default = true,
},
[ds_1341] = {
.century_reg = DS1307_REG_MONTH,
.century_bit = DS1337_BIT_CENTURY,
},
[ds_1388] = {
.offset = 1,
.trickle_charger_reg = 0x0a,
},
[ds_3231] = {
.alarm = 1,
.century_reg = DS1307_REG_MONTH,
.century_bit = DS1337_BIT_CENTURY,
.bbsqi_bit = DS3231_BIT_BBSQW,
},
[rx_8130] = {
.alarm = 1,
/* this is battery backed SRAM */
.nvram_offset = 0x20,
.nvram_size = 4, /* 32bit (4 word x 8 bit) */
.offset = 0x10,
.irq_handler = rx8130_irq,
.rtc_ops = &rx8130_rtc_ops,
.trickle_charger_reg = RX8130_REG_CONTROL1,
.do_trickle_setup = &do_trickle_setup_rx8130,
},
[m41t0] = {
.rtc_ops = &m41txx_rtc_ops,
},
[m41t00] = {
.rtc_ops = &m41txx_rtc_ops,
},
[m41t11] = {
/* this is battery backed SRAM */
.nvram_offset = 8,
.nvram_size = 56,
.rtc_ops = &m41txx_rtc_ops,
},
[mcp794xx] = {
.alarm = 1,
/* this is battery backed SRAM */
.nvram_offset = 0x20,
.nvram_size = 0x40,
.irq_handler = mcp794xx_irq,
.rtc_ops = &mcp794xx_rtc_ops,
},
};
static const struct i2c_device_id ds1307_id[] = {
{ "ds1307", ds_1307 },
{ "ds1308", ds_1308 },
{ "ds1337", ds_1337 },
{ "ds1338", ds_1338 },
{ "ds1339", ds_1339 },
{ "ds1388", ds_1388 },
{ "ds1340", ds_1340 },
{ "ds1341", ds_1341 },
{ "ds3231", ds_3231 },
{ "m41t0", m41t0 },
{ "m41t00", m41t00 },
{ "m41t11", m41t11 },
{ "mcp7940x", mcp794xx },
{ "mcp7941x", mcp794xx },
{ "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
{ "isl12057", ds_1337 },
{ "rx8130", rx_8130 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
static const struct of_device_id ds1307_of_match[] = {
{
.compatible = "dallas,ds1307",
.data = (void *)ds_1307
},
{
.compatible = "dallas,ds1308",
.data = (void *)ds_1308
},
{
.compatible = "dallas,ds1337",
.data = (void *)ds_1337
},
{
.compatible = "dallas,ds1338",
.data = (void *)ds_1338
},
{
.compatible = "dallas,ds1339",
.data = (void *)ds_1339
},
{
.compatible = "dallas,ds1388",
.data = (void *)ds_1388
},
{
.compatible = "dallas,ds1340",
.data = (void *)ds_1340
},
{
.compatible = "dallas,ds1341",
.data = (void *)ds_1341
},
{
.compatible = "maxim,ds3231",
.data = (void *)ds_3231
},
{
.compatible = "st,m41t0",
.data = (void *)m41t0
},
{
.compatible = "st,m41t00",
.data = (void *)m41t00
},
{
.compatible = "st,m41t11",
.data = (void *)m41t11
},
{
.compatible = "microchip,mcp7940x",
.data = (void *)mcp794xx
},
{
.compatible = "microchip,mcp7941x",
.data = (void *)mcp794xx
},
{
.compatible = "pericom,pt7c4338",
.data = (void *)ds_1307
},
{
.compatible = "epson,rx8025",
.data = (void *)rx_8025
},
{
.compatible = "isil,isl12057",
.data = (void *)ds_1337
},
{
.compatible = "epson,rx8130",
.data = (void *)rx_8130
},
{ }
};
MODULE_DEVICE_TABLE(of, ds1307_of_match);
/*
* The ds1337 and ds1339 both have two alarms, but we only use the first
* one (with a "seconds" field). For ds1337 we expect nINTA is our alarm
* signal; ds1339 chips have only one alarm signal.
*/
static irqreturn_t ds1307_irq(int irq, void *dev_id)
{
struct ds1307 *ds1307 = dev_id;
struct mutex *lock = &ds1307->rtc->ops_lock;
int stat, ret;
mutex_lock(lock);
ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &stat);
if (ret)
goto out;
if (stat & DS1337_BIT_A1I) {
stat &= ~DS1337_BIT_A1I;
regmap_write(ds1307->regmap, DS1337_REG_STATUS, stat);
ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
DS1337_BIT_A1IE, 0);
if (ret)
goto out;
rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
}
out:
mutex_unlock(lock);
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
static const struct rtc_class_ops ds13xx_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
.read_alarm = ds1337_read_alarm,
.set_alarm = ds1337_set_alarm,
.alarm_irq_enable = ds1307_alarm_irq_enable,
};
static ssize_t frequency_test_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev->parent);
bool freq_test_en;
int ret;
ret = kstrtobool(buf, &freq_test_en);
if (ret) {
dev_err(dev, "Failed to store RTC Frequency Test attribute\n");
return ret;
}
regmap_update_bits(ds1307->regmap, M41TXX_REG_CONTROL, M41TXX_BIT_FT,
freq_test_en ? M41TXX_BIT_FT : 0);
return count;
}
static ssize_t frequency_test_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev->parent);
unsigned int ctrl_reg;
regmap_read(ds1307->regmap, M41TXX_REG_CONTROL, &ctrl_reg);
return sysfs_emit(buf, (ctrl_reg & M41TXX_BIT_FT) ? "on\n" : "off\n");
}
static DEVICE_ATTR_RW(frequency_test);
static struct attribute *rtc_freq_test_attrs[] = {
&dev_attr_frequency_test.attr,
NULL,
};
static const struct attribute_group rtc_freq_test_attr_group = {
.attrs = rtc_freq_test_attrs,
};
static int ds1307_add_frequency_test(struct ds1307 *ds1307)
{
int err;
switch (ds1307->type) {
case m41t0:
case m41t00:
case m41t11:
err = rtc_add_group(ds1307->rtc, &rtc_freq_test_attr_group);
if (err)
return err;
break;
default:
break;
}
return 0;
}
/*----------------------------------------------------------------------*/
static int ds1307_nvram_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct ds1307 *ds1307 = priv;
const struct chip_desc *chip = &chips[ds1307->type];
return regmap_bulk_read(ds1307->regmap, chip->nvram_offset + offset,
val, bytes);
}
static int ds1307_nvram_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct ds1307 *ds1307 = priv;
const struct chip_desc *chip = &chips[ds1307->type];
return regmap_bulk_write(ds1307->regmap, chip->nvram_offset + offset,
val, bytes);
}
/*----------------------------------------------------------------------*/
static u8 ds1307_trickle_init(struct ds1307 *ds1307,
const struct chip_desc *chip)
{
u32 ohms, chargeable;
bool diode = chip->charge_default;
if (!chip->do_trickle_setup)
return 0;
if (device_property_read_u32(ds1307->dev, "trickle-resistor-ohms",
&ohms) && chip->requires_trickle_resistor)
return 0;
/* aux-voltage-chargeable takes precedence over the deprecated
* trickle-diode-disable
*/
if (!device_property_read_u32(ds1307->dev, "aux-voltage-chargeable",
&chargeable)) {
switch (chargeable) {
case 0:
diode = false;
break;
case 1:
diode = true;
break;
default:
dev_warn(ds1307->dev,
"unsupported aux-voltage-chargeable value\n");
break;
}
} else if (device_property_read_bool(ds1307->dev,
"trickle-diode-disable")) {
diode = false;
}
return chip->do_trickle_setup(ds1307, ohms, diode);
}
/*----------------------------------------------------------------------*/
#if IS_REACHABLE(CONFIG_HWMON)
/*
* Temperature sensor support for ds3231 devices.
*/
#define DS3231_REG_TEMPERATURE 0x11
/*
* A user-initiated temperature conversion is not started by this function,
* so the temperature is updated once every 64 seconds.
*/
static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 temp_buf[2];
s16 temp;
int ret;
ret = regmap_bulk_read(ds1307->regmap, DS3231_REG_TEMPERATURE,
temp_buf, sizeof(temp_buf));
if (ret)
return ret;
/*
* Temperature is represented as a 10-bit code with a resolution of
* 0.25 degree celsius and encoded in two's complement format.
*/
temp = (temp_buf[0] << 8) | temp_buf[1];
temp >>= 6;
*mC = temp * 250;
return 0;
}
static ssize_t ds3231_hwmon_show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
s32 temp;
ret = ds3231_hwmon_read_temp(dev, &temp);
if (ret)
return ret;
return sprintf(buf, "%d\n", temp);
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, ds3231_hwmon_show_temp,
NULL, 0);
static struct attribute *ds3231_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(ds3231_hwmon);
static void ds1307_hwmon_register(struct ds1307 *ds1307)
{
struct device *dev;
if (ds1307->type != ds_3231)
return;
dev = devm_hwmon_device_register_with_groups(ds1307->dev, ds1307->name,
ds1307,
ds3231_hwmon_groups);
if (IS_ERR(dev)) {
dev_warn(ds1307->dev, "unable to register hwmon device %ld\n",
PTR_ERR(dev));
}
}
#else
static void ds1307_hwmon_register(struct ds1307 *ds1307)
{
}
#endif /* CONFIG_RTC_DRV_DS1307_HWMON */
/*----------------------------------------------------------------------*/
/*
* Square-wave output support for DS3231
* Datasheet: https://datasheets.maximintegrated.com/en/ds/DS3231.pdf
*/
#ifdef CONFIG_COMMON_CLK
enum {
DS3231_CLK_SQW = 0,
DS3231_CLK_32KHZ,
};
#define clk_sqw_to_ds1307(clk) \
container_of(clk, struct ds1307, clks[DS3231_CLK_SQW])
#define clk_32khz_to_ds1307(clk) \
container_of(clk, struct ds1307, clks[DS3231_CLK_32KHZ])
static int ds3231_clk_sqw_rates[] = {
1,
1024,
4096,
8192,
};
static int ds1337_write_control(struct ds1307 *ds1307, u8 mask, u8 value)
{
struct mutex *lock = &ds1307->rtc->ops_lock;
int ret;
mutex_lock(lock);
ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL,
mask, value);
mutex_unlock(lock);
return ret;
}
static unsigned long ds3231_clk_sqw_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
int control, ret;
int rate_sel = 0;
ret = regmap_read(ds1307->regmap, DS1337_REG_CONTROL, &control);
if (ret)
return ret;
if (control & DS1337_BIT_RS1)
rate_sel += 1;
if (control & DS1337_BIT_RS2)
rate_sel += 2;
return ds3231_clk_sqw_rates[rate_sel];
}
static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int i;
for (i = ARRAY_SIZE(ds3231_clk_sqw_rates) - 1; i >= 0; i--) {
if (ds3231_clk_sqw_rates[i] <= rate)
return ds3231_clk_sqw_rates[i];
}
return 0;
}
static int ds3231_clk_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
int control = 0;
int rate_sel;
for (rate_sel = 0; rate_sel < ARRAY_SIZE(ds3231_clk_sqw_rates);
rate_sel++) {
if (ds3231_clk_sqw_rates[rate_sel] == rate)
break;
}
if (rate_sel == ARRAY_SIZE(ds3231_clk_sqw_rates))
return -EINVAL;
if (rate_sel & 1)
control |= DS1337_BIT_RS1;
if (rate_sel & 2)
control |= DS1337_BIT_RS2;
return ds1337_write_control(ds1307, DS1337_BIT_RS1 | DS1337_BIT_RS2,
control);
}
static int ds3231_clk_sqw_prepare(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
return ds1337_write_control(ds1307, DS1337_BIT_INTCN, 0);
}
static void ds3231_clk_sqw_unprepare(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
ds1337_write_control(ds1307, DS1337_BIT_INTCN, DS1337_BIT_INTCN);
}
static int ds3231_clk_sqw_is_prepared(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
int control, ret;
ret = regmap_read(ds1307->regmap, DS1337_REG_CONTROL, &control);
if (ret)
return ret;
return !(control & DS1337_BIT_INTCN);
}
static const struct clk_ops ds3231_clk_sqw_ops = {
.prepare = ds3231_clk_sqw_prepare,
.unprepare = ds3231_clk_sqw_unprepare,
.is_prepared = ds3231_clk_sqw_is_prepared,
.recalc_rate = ds3231_clk_sqw_recalc_rate,
.round_rate = ds3231_clk_sqw_round_rate,
.set_rate = ds3231_clk_sqw_set_rate,
};
static unsigned long ds3231_clk_32khz_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 32768;
}
static int ds3231_clk_32khz_control(struct ds1307 *ds1307, bool enable)
{
struct mutex *lock = &ds1307->rtc->ops_lock;
int ret;
mutex_lock(lock);
ret = regmap_update_bits(ds1307->regmap, DS1337_REG_STATUS,
DS3231_BIT_EN32KHZ,
enable ? DS3231_BIT_EN32KHZ : 0);
mutex_unlock(lock);
return ret;
}
static int ds3231_clk_32khz_prepare(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
return ds3231_clk_32khz_control(ds1307, true);
}
static void ds3231_clk_32khz_unprepare(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
ds3231_clk_32khz_control(ds1307, false);
}
static int ds3231_clk_32khz_is_prepared(struct clk_hw *hw)
{
struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
int status, ret;
ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &status);
if (ret)
return ret;
return !!(status & DS3231_BIT_EN32KHZ);
}
static const struct clk_ops ds3231_clk_32khz_ops = {
.prepare = ds3231_clk_32khz_prepare,
.unprepare = ds3231_clk_32khz_unprepare,
.is_prepared = ds3231_clk_32khz_is_prepared,
.recalc_rate = ds3231_clk_32khz_recalc_rate,
};
static const char *ds3231_clks_names[] = {
[DS3231_CLK_SQW] = "ds3231_clk_sqw",
[DS3231_CLK_32KHZ] = "ds3231_clk_32khz",
};
static struct clk_init_data ds3231_clks_init[] = {
[DS3231_CLK_SQW] = {
.ops = &ds3231_clk_sqw_ops,
},
[DS3231_CLK_32KHZ] = {
.ops = &ds3231_clk_32khz_ops,
},
};
static int ds3231_clks_register(struct ds1307 *ds1307)
{
struct device_node *node = ds1307->dev->of_node;
struct clk_onecell_data *onecell;
int i;
onecell = devm_kzalloc(ds1307->dev, sizeof(*onecell), GFP_KERNEL);
if (!onecell)
return -ENOMEM;
onecell->clk_num = ARRAY_SIZE(ds3231_clks_init);
onecell->clks = devm_kcalloc(ds1307->dev, onecell->clk_num,
sizeof(onecell->clks[0]), GFP_KERNEL);
if (!onecell->clks)
return -ENOMEM;
/* optional override of the clockname */
device_property_read_string_array(ds1307->dev, "clock-output-names",
ds3231_clks_names,
ARRAY_SIZE(ds3231_clks_names));
for (i = 0; i < ARRAY_SIZE(ds3231_clks_init); i++) {
struct clk_init_data init = ds3231_clks_init[i];
/*
* Interrupt signal due to alarm conditions and square-wave
* output share same pin, so don't initialize both.
*/
if (i == DS3231_CLK_SQW && test_bit(RTC_FEATURE_ALARM, ds1307->rtc->features))
continue;
init.name = ds3231_clks_names[i];
ds1307->clks[i].init = &init;
onecell->clks[i] = devm_clk_register(ds1307->dev,
&ds1307->clks[i]);
if (IS_ERR(onecell->clks[i]))
return PTR_ERR(onecell->clks[i]);
}
if (node)
of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
return 0;
}
static void ds1307_clks_register(struct ds1307 *ds1307)
{
int ret;
if (ds1307->type != ds_3231)
return;
ret = ds3231_clks_register(ds1307);
if (ret) {
dev_warn(ds1307->dev, "unable to register clock device %d\n",
ret);
}
}
#else
static void ds1307_clks_register(struct ds1307 *ds1307)
{
}
#endif /* CONFIG_COMMON_CLK */
#ifdef CONFIG_WATCHDOG_CORE
static const struct watchdog_info ds1388_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "DS1388 watchdog",
};
static const struct watchdog_ops ds1388_wdt_ops = {
.owner = THIS_MODULE,
.start = ds1388_wdt_start,
.stop = ds1388_wdt_stop,
.ping = ds1388_wdt_ping,
.set_timeout = ds1388_wdt_set_timeout,
};
static void ds1307_wdt_register(struct ds1307 *ds1307)
{
struct watchdog_device *wdt;
int err;
int val;
if (ds1307->type != ds_1388)
return;
wdt = devm_kzalloc(ds1307->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return;
err = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &val);
if (!err && val & DS1388_BIT_WF)
wdt->bootstatus = WDIOF_CARDRESET;
wdt->info = &ds1388_wdt_info;
wdt->ops = &ds1388_wdt_ops;
wdt->timeout = 99;
wdt->max_timeout = 99;
wdt->min_timeout = 1;
watchdog_init_timeout(wdt, 0, ds1307->dev);
watchdog_set_drvdata(wdt, ds1307);
devm_watchdog_register_device(ds1307->dev, wdt);
}
#else
static void ds1307_wdt_register(struct ds1307 *ds1307)
{
}
#endif /* CONFIG_WATCHDOG_CORE */
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int ds1307_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct ds1307 *ds1307;
const void *match;
int err = -ENODEV;
int tmp;
const struct chip_desc *chip;
bool want_irq;
bool ds1307_can_wakeup_device = false;
unsigned char regs[8];
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
u8 trickle_charger_setup = 0;
ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL);
if (!ds1307)
return -ENOMEM;
dev_set_drvdata(&client->dev, ds1307);
ds1307->dev = &client->dev;
ds1307->name = client->name;
ds1307->regmap = devm_regmap_init_i2c(client, ®map_config);
if (IS_ERR(ds1307->regmap)) {
dev_err(ds1307->dev, "regmap allocation failed\n");
return PTR_ERR(ds1307->regmap);
}
i2c_set_clientdata(client, ds1307);
match = device_get_match_data(&client->dev);
if (match) {
ds1307->type = (uintptr_t)match;
chip = &chips[ds1307->type];
} else if (id) {
chip = &chips[id->driver_data];
ds1307->type = id->driver_data;
} else {
return -ENODEV;
}
want_irq = client->irq > 0 && chip->alarm;
if (!pdata)
trickle_charger_setup = ds1307_trickle_init(ds1307, chip);
else if (pdata->trickle_charger_setup)
trickle_charger_setup = pdata->trickle_charger_setup;
if (trickle_charger_setup && chip->trickle_charger_reg) {
dev_dbg(ds1307->dev,
"writing trickle charger info 0x%x to 0x%x\n",
trickle_charger_setup, chip->trickle_charger_reg);
regmap_write(ds1307->regmap, chip->trickle_charger_reg,
trickle_charger_setup);
}
/*
* For devices with no IRQ directly connected to the SoC, the RTC chip
* can be forced as a wakeup source by stating that explicitly in
* the device's .dts file using the "wakeup-source" boolean property.
* If the "wakeup-source" property is set, don't request an IRQ.
* This will guarantee the 'wakealarm' sysfs entry is available on the device,
* if supported by the RTC.
*/
if (chip->alarm && device_property_read_bool(&client->dev, "wakeup-source"))
ds1307_can_wakeup_device = true;
switch (ds1307->type) {
case ds_1337:
case ds_1339:
case ds_1341:
case ds_3231:
/* get registers that the "rtc" read below won't read... */
err = regmap_bulk_read(ds1307->regmap, DS1337_REG_CONTROL,
regs, 2);
if (err) {
dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
/* oscillator off? turn it on, so clock can tick. */
if (regs[0] & DS1337_BIT_nEOSC)
regs[0] &= ~DS1337_BIT_nEOSC;
/*
* Using IRQ or defined as wakeup-source?
* Disable the square wave and both alarms.
* For some variants, be sure alarms can trigger when we're
* running on Vbackup (BBSQI/BBSQW)
*/
if (want_irq || ds1307_can_wakeup_device) {
regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit;
regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
}
regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
regs[0]);
/* oscillator fault? clear flag, and warn */
if (regs[1] & DS1337_BIT_OSF) {
regmap_write(ds1307->regmap, DS1337_REG_STATUS,
regs[1] & ~DS1337_BIT_OSF);
dev_warn(ds1307->dev, "SET TIME!\n");
}
break;
case rx_8025:
err = regmap_bulk_read(ds1307->regmap,
RX8025_REG_CTRL1 << 4 | 0x08, regs, 2);
if (err) {
dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
/* oscillator off? turn it on, so clock can tick. */
if (!(regs[1] & RX8025_BIT_XST)) {
regs[1] |= RX8025_BIT_XST;
regmap_write(ds1307->regmap,
RX8025_REG_CTRL2 << 4 | 0x08,
regs[1]);
dev_warn(ds1307->dev,
"oscillator stop detected - SET TIME!\n");
}
if (regs[1] & RX8025_BIT_PON) {
regs[1] &= ~RX8025_BIT_PON;
regmap_write(ds1307->regmap,
RX8025_REG_CTRL2 << 4 | 0x08,
regs[1]);
dev_warn(ds1307->dev, "power-on detected\n");
}
if (regs[1] & RX8025_BIT_VDET) {
regs[1] &= ~RX8025_BIT_VDET;
regmap_write(ds1307->regmap,
RX8025_REG_CTRL2 << 4 | 0x08,
regs[1]);
dev_warn(ds1307->dev, "voltage drop detected\n");
}
/* make sure we are running in 24hour mode */
if (!(regs[0] & RX8025_BIT_2412)) {
u8 hour;
/* switch to 24 hour mode */
regmap_write(ds1307->regmap,
RX8025_REG_CTRL1 << 4 | 0x08,
regs[0] | RX8025_BIT_2412);
err = regmap_bulk_read(ds1307->regmap,
RX8025_REG_CTRL1 << 4 | 0x08,
regs, 2);
if (err) {
dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
/* correct hour */
hour = bcd2bin(regs[DS1307_REG_HOUR]);
if (hour == 12)
hour = 0;
if (regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
hour += 12;
regmap_write(ds1307->regmap,
DS1307_REG_HOUR << 4 | 0x08, hour);
}
break;
case ds_1388:
err = regmap_read(ds1307->regmap, DS1388_REG_CONTROL, &tmp);
if (err) {
dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
/* oscillator off? turn it on, so clock can tick. */
if (tmp & DS1388_BIT_nEOSC) {
tmp &= ~DS1388_BIT_nEOSC;
regmap_write(ds1307->regmap, DS1388_REG_CONTROL, tmp);
}
break;
default:
break;
}
/* read RTC registers */
err = regmap_bulk_read(ds1307->regmap, chip->offset, regs,
sizeof(regs));
if (err) {
dev_dbg(ds1307->dev, "read error %d\n", err);
goto exit;
}
if (ds1307->type == mcp794xx &&
!(regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) {
regmap_write(ds1307->regmap, DS1307_REG_WDAY,
regs[DS1307_REG_WDAY] |
MCP794XX_BIT_VBATEN);
}
tmp = regs[DS1307_REG_HOUR];
switch (ds1307->type) {
case ds_1340:
case m41t0:
case m41t00:
case m41t11:
/*
* NOTE: ignores century bits; fix before deploying
* systems that will run through year 2100.
*/
break;
case rx_8025:
break;
default:
if (!(tmp & DS1307_BIT_12HR))
break;
/*
* Be sure we're in 24 hour mode. Multi-master systems
* take note...
*/
tmp = bcd2bin(tmp & 0x1f);
if (tmp == 12)
tmp = 0;
if (regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
tmp += 12;
regmap_write(ds1307->regmap, chip->offset + DS1307_REG_HOUR,
bin2bcd(tmp));
}
ds1307->rtc = devm_rtc_allocate_device(ds1307->dev);
if (IS_ERR(ds1307->rtc))
return PTR_ERR(ds1307->rtc);
if (want_irq || ds1307_can_wakeup_device)
device_set_wakeup_capable(ds1307->dev, true);
else
clear_bit(RTC_FEATURE_ALARM, ds1307->rtc->features);
if (ds1307_can_wakeup_device && !want_irq) {
dev_info(ds1307->dev,
"'wakeup-source' is set, request for an IRQ is disabled!\n");
/* We cannot support UIE mode if we do not have an IRQ line */
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, ds1307->rtc->features);
}
if (want_irq) {
err = devm_request_threaded_irq(ds1307->dev, client->irq, NULL,
chip->irq_handler ?: ds1307_irq,
IRQF_SHARED | IRQF_ONESHOT,
ds1307->name, ds1307);
if (err) {
client->irq = 0;
device_set_wakeup_capable(ds1307->dev, false);
clear_bit(RTC_FEATURE_ALARM, ds1307->rtc->features);
dev_err(ds1307->dev, "unable to request IRQ!\n");
} else {
dev_dbg(ds1307->dev, "got IRQ %d\n", client->irq);
}
}
ds1307->rtc->ops = chip->rtc_ops ?: &ds13xx_rtc_ops;
err = ds1307_add_frequency_test(ds1307);
if (err)
return err;
err = devm_rtc_register_device(ds1307->rtc);
if (err)
return err;
if (chip->nvram_size) {
struct nvmem_config nvmem_cfg = {
.name = "ds1307_nvram",
.word_size = 1,
.stride = 1,
.size = chip->nvram_size,
.reg_read = ds1307_nvram_read,
.reg_write = ds1307_nvram_write,
.priv = ds1307,
};
devm_rtc_nvmem_register(ds1307->rtc, &nvmem_cfg);
}
ds1307_hwmon_register(ds1307);
ds1307_clks_register(ds1307);
ds1307_wdt_register(ds1307);
return 0;
exit:
return err;
}
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
.of_match_table = ds1307_of_match,
},
.probe = ds1307_probe,
.id_table = ds1307_id,
};
module_i2c_driver(ds1307_driver);
MODULE_DESCRIPTION("RTC driver for DS1307 and similar chips");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-ds1307.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Realtek RTD129x RTC
*
* Copyright (c) 2017 Andreas Färber
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/spinlock.h>
#define RTD_RTCSEC 0x00
#define RTD_RTCMIN 0x04
#define RTD_RTCHR 0x08
#define RTD_RTCDATE1 0x0c
#define RTD_RTCDATE2 0x10
#define RTD_RTCACR 0x28
#define RTD_RTCEN 0x2c
#define RTD_RTCCR 0x30
#define RTD_RTCSEC_RTCSEC_MASK 0x7f
#define RTD_RTCMIN_RTCMIN_MASK 0x3f
#define RTD_RTCHR_RTCHR_MASK 0x1f
#define RTD_RTCDATE1_RTCDATE1_MASK 0xff
#define RTD_RTCDATE2_RTCDATE2_MASK 0x7f
#define RTD_RTCACR_RTCPWR BIT(7)
#define RTD_RTCEN_RTCEN_MASK 0xff
#define RTD_RTCCR_RTCRST BIT(6)
struct rtd119x_rtc {
void __iomem *base;
struct clk *clk;
struct rtc_device *rtcdev;
unsigned int base_year;
};
static inline int rtd119x_rtc_days_in_year(int year)
{
return 365 + (is_leap_year(year) ? 1 : 0);
}
static void rtd119x_rtc_reset(struct device *dev)
{
struct rtd119x_rtc *data = dev_get_drvdata(dev);
u32 val;
val = readl_relaxed(data->base + RTD_RTCCR);
val |= RTD_RTCCR_RTCRST;
writel_relaxed(val, data->base + RTD_RTCCR);
val &= ~RTD_RTCCR_RTCRST;
writel(val, data->base + RTD_RTCCR);
}
static void rtd119x_rtc_set_enabled(struct device *dev, bool enable)
{
struct rtd119x_rtc *data = dev_get_drvdata(dev);
u32 val;
val = readl_relaxed(data->base + RTD_RTCEN);
if (enable) {
if ((val & RTD_RTCEN_RTCEN_MASK) == 0x5a)
return;
writel_relaxed(0x5a, data->base + RTD_RTCEN);
} else {
writel_relaxed(0, data->base + RTD_RTCEN);
}
}
static int rtd119x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct rtd119x_rtc *data = dev_get_drvdata(dev);
s32 day;
u32 sec;
unsigned int year;
int tries = 0;
while (true) {
tm->tm_sec = (readl_relaxed(data->base + RTD_RTCSEC) & RTD_RTCSEC_RTCSEC_MASK) >> 1;
tm->tm_min = readl_relaxed(data->base + RTD_RTCMIN) & RTD_RTCMIN_RTCMIN_MASK;
tm->tm_hour = readl_relaxed(data->base + RTD_RTCHR) & RTD_RTCHR_RTCHR_MASK;
day = readl_relaxed(data->base + RTD_RTCDATE1) & RTD_RTCDATE1_RTCDATE1_MASK;
day |= (readl_relaxed(data->base + RTD_RTCDATE2) & RTD_RTCDATE2_RTCDATE2_MASK) << 8;
sec = (readl_relaxed(data->base + RTD_RTCSEC) & RTD_RTCSEC_RTCSEC_MASK) >> 1;
tries++;
if (sec == tm->tm_sec)
break;
if (tries >= 3)
return -EINVAL;
}
if (tries > 1)
dev_dbg(dev, "%s: needed %i tries\n", __func__, tries);
year = data->base_year;
while (day >= rtd119x_rtc_days_in_year(year)) {
day -= rtd119x_rtc_days_in_year(year);
year++;
}
tm->tm_year = year - 1900;
tm->tm_yday = day;
tm->tm_mon = 0;
while (day >= rtc_month_days(tm->tm_mon, year)) {
day -= rtc_month_days(tm->tm_mon, year);
tm->tm_mon++;
}
tm->tm_mday = day + 1;
return 0;
}
static int rtd119x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct rtd119x_rtc *data = dev_get_drvdata(dev);
unsigned int day;
int i;
if (1900 + tm->tm_year < data->base_year)
return -EINVAL;
day = 0;
for (i = data->base_year; i < 1900 + tm->tm_year; i++)
day += rtd119x_rtc_days_in_year(i);
day += tm->tm_yday;
if (day > 0x7fff)
return -EINVAL;
rtd119x_rtc_set_enabled(dev, false);
writel_relaxed((tm->tm_sec << 1) & RTD_RTCSEC_RTCSEC_MASK, data->base + RTD_RTCSEC);
writel_relaxed(tm->tm_min & RTD_RTCMIN_RTCMIN_MASK, data->base + RTD_RTCMIN);
writel_relaxed(tm->tm_hour & RTD_RTCHR_RTCHR_MASK, data->base + RTD_RTCHR);
writel_relaxed(day & RTD_RTCDATE1_RTCDATE1_MASK, data->base + RTD_RTCDATE1);
writel_relaxed((day >> 8) & RTD_RTCDATE2_RTCDATE2_MASK, data->base + RTD_RTCDATE2);
rtd119x_rtc_set_enabled(dev, true);
return 0;
}
static const struct rtc_class_ops rtd119x_rtc_ops = {
.read_time = rtd119x_rtc_read_time,
.set_time = rtd119x_rtc_set_time,
};
static const struct of_device_id rtd119x_rtc_dt_ids[] = {
{ .compatible = "realtek,rtd1295-rtc" },
{ }
};
static int rtd119x_rtc_probe(struct platform_device *pdev)
{
struct rtd119x_rtc *data;
u32 val;
int ret;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
platform_set_drvdata(pdev, data);
data->base_year = 2014;
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
data->clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret) {
clk_put(data->clk);
return ret;
}
val = readl_relaxed(data->base + RTD_RTCACR);
if (!(val & RTD_RTCACR_RTCPWR)) {
writel_relaxed(RTD_RTCACR_RTCPWR, data->base + RTD_RTCACR);
rtd119x_rtc_reset(&pdev->dev);
writel_relaxed(0, data->base + RTD_RTCMIN);
writel_relaxed(0, data->base + RTD_RTCHR);
writel_relaxed(0, data->base + RTD_RTCDATE1);
writel_relaxed(0, data->base + RTD_RTCDATE2);
}
rtd119x_rtc_set_enabled(&pdev->dev, true);
data->rtcdev = devm_rtc_device_register(&pdev->dev, "rtc",
&rtd119x_rtc_ops, THIS_MODULE);
if (IS_ERR(data->rtcdev)) {
dev_err(&pdev->dev, "failed to register rtc device");
clk_disable_unprepare(data->clk);
clk_put(data->clk);
return PTR_ERR(data->rtcdev);
}
return 0;
}
static void rtd119x_rtc_remove(struct platform_device *pdev)
{
struct rtd119x_rtc *data = platform_get_drvdata(pdev);
rtd119x_rtc_set_enabled(&pdev->dev, false);
clk_disable_unprepare(data->clk);
clk_put(data->clk);
}
static struct platform_driver rtd119x_rtc_driver = {
.probe = rtd119x_rtc_probe,
.remove_new = rtd119x_rtc_remove,
.driver = {
.name = "rtd1295-rtc",
.of_match_table = rtd119x_rtc_dt_ids,
},
};
builtin_platform_driver(rtd119x_rtc_driver);
| linux-master | drivers/rtc/rtc-rtd119x.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/mc146818rtc.h>
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
/*
* Execute a function while the UIP (Update-in-progress) bit of the RTC is
* unset.
*
* Warning: callback may be executed more then once.
*/
bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
void *param)
{
int i;
unsigned long flags;
unsigned char seconds;
for (i = 0; i < 100; i++) {
spin_lock_irqsave(&rtc_lock, flags);
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
* every 100 usec for completion.
*
* Store the second value before checking UIP so a long lasting
* NMI which happens to hit after the UIP check cannot make
* an update cycle invisible.
*/
seconds = CMOS_READ(RTC_SECONDS);
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
udelay(100);
continue;
}
/* Revalidate the above readout */
if (seconds != CMOS_READ(RTC_SECONDS)) {
spin_unlock_irqrestore(&rtc_lock, flags);
continue;
}
if (callback)
callback(seconds, param);
/*
* Check for the UIP bit again. If it is set now then
* the above values may contain garbage.
*/
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
udelay(100);
continue;
}
/*
* A NMI might have interrupted the above sequence so check
* whether the seconds value has changed which indicates that
* the NMI took longer than the UIP bit was set. Unlikely, but
* possible and there is also virt...
*/
if (seconds != CMOS_READ(RTC_SECONDS)) {
spin_unlock_irqrestore(&rtc_lock, flags);
continue;
}
spin_unlock_irqrestore(&rtc_lock, flags);
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
/*
* If the UIP (Update-in-progress) bit of the RTC is set for more then
* 10ms, the RTC is apparently broken or not present.
*/
bool mc146818_does_rtc_work(void)
{
return mc146818_avoid_UIP(NULL, NULL);
}
EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
struct mc146818_get_time_callback_param {
struct rtc_time *time;
unsigned char ctrl;
#ifdef CONFIG_ACPI
unsigned char century;
#endif
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
#endif
};
static void mc146818_get_time_callback(unsigned char seconds, void *param_in)
{
struct mc146818_get_time_callback_param *p = param_in;
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
p->time->tm_sec = seconds;
p->time->tm_min = CMOS_READ(RTC_MINUTES);
p->time->tm_hour = CMOS_READ(RTC_HOURS);
p->time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
p->time->tm_mon = CMOS_READ(RTC_MONTH);
p->time->tm_year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_MACH_DECSTATION
p->real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_gbl_FADT.century) {
p->century = CMOS_READ(acpi_gbl_FADT.century);
} else {
p->century = 0;
}
#endif
p->ctrl = CMOS_READ(RTC_CONTROL);
}
int mc146818_get_time(struct rtc_time *time)
{
struct mc146818_get_time_callback_param p = {
.time = time
};
if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) {
memset(time, 0, sizeof(*time));
return -EIO;
}
if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
time->tm_sec = bcd2bin(time->tm_sec);
time->tm_min = bcd2bin(time->tm_min);
time->tm_hour = bcd2bin(time->tm_hour);
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
#ifdef CONFIG_ACPI
p.century = bcd2bin(p.century);
#endif
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += p.real_year - 72;
#endif
#ifdef CONFIG_ACPI
if (p.century > 19)
time->tm_year += (p.century - 19) * 100;
#endif
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
if (time->tm_year <= 69)
time->tm_year += 100;
time->tm_mon--;
return 0;
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
/* AMD systems don't allow access to AltCentury with DV1 */
static bool apply_amd_register_a_behavior(void)
{
#ifdef CONFIG_X86
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return true;
#endif
return false;
}
/* Set the current date and time in the real time clock. */
int mc146818_set_time(struct rtc_time *time)
{
unsigned long flags;
unsigned char mon, day, hrs, min, sec;
unsigned char save_control, save_freq_select;
unsigned int yrs;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
unsigned char century = 0;
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
day = time->tm_mday;
hrs = time->tm_hour;
min = time->tm_min;
sec = time->tm_sec;
if (yrs > 255) /* They are unsigned */
return -EINVAL;
#ifdef CONFIG_MACH_DECSTATION
real_yrs = yrs;
leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
!((yrs + 1900) % 400));
yrs = 72;
/*
* We want to keep the year set to 73 until March
* for non-leap years, so that Feb, 29th is handled
* correctly.
*/
if (!leap_yr && mon < 3) {
real_yrs--;
yrs = 73;
}
#endif
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_gbl_FADT.century) {
century = (yrs + 1900) / 100;
yrs %= 100;
}
#endif
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
if (yrs > 169)
return -EINVAL;
if (yrs >= 100)
yrs -= 100;
spin_lock_irqsave(&rtc_lock, flags);
save_control = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
century = bin2bcd(century);
}
spin_lock_irqsave(&rtc_lock, flags);
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
if (apply_amd_register_a_behavior())
CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
else
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
#endif
CMOS_WRITE(yrs, RTC_YEAR);
CMOS_WRITE(mon, RTC_MONTH);
CMOS_WRITE(day, RTC_DAY_OF_MONTH);
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_gbl_FADT.century)
CMOS_WRITE(century, acpi_gbl_FADT.century);
#endif
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(mc146818_set_time);
| linux-master | drivers/rtc/rtc-mc146818-lib.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* An SPI driver for the Philips PCF2123 RTC
* Copyright 2009 Cyber Switching, Inc.
*
* Author: Chris Verges <[email protected]>
* Maintainers: http://www.cyberswitching.com
*
* based on the RS5C348 driver in this same directory.
*
* Thanks to Christian Pellegrin <[email protected]> for
* the sysfs contributions to this driver.
*
* Please note that the CS is active high, so platform data
* should look something like:
*
* static struct spi_board_info ek_spi_devices[] = {
* ...
* {
* .modalias = "rtc-pcf2123",
* .chip_select = 1,
* .controller_data = (void *)AT91_PIN_PA10,
* .max_speed_hz = 1000 * 1000,
* .mode = SPI_CS_HIGH,
* .bus_num = 0,
* },
* ...
*};
*/
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/regmap.h>
/* REGISTERS */
#define PCF2123_REG_CTRL1 (0x00) /* Control Register 1 */
#define PCF2123_REG_CTRL2 (0x01) /* Control Register 2 */
#define PCF2123_REG_SC (0x02) /* datetime */
#define PCF2123_REG_MN (0x03)
#define PCF2123_REG_HR (0x04)
#define PCF2123_REG_DM (0x05)
#define PCF2123_REG_DW (0x06)
#define PCF2123_REG_MO (0x07)
#define PCF2123_REG_YR (0x08)
#define PCF2123_REG_ALRM_MN (0x09) /* Alarm Registers */
#define PCF2123_REG_ALRM_HR (0x0a)
#define PCF2123_REG_ALRM_DM (0x0b)
#define PCF2123_REG_ALRM_DW (0x0c)
#define PCF2123_REG_OFFSET (0x0d) /* Clock Rate Offset Register */
#define PCF2123_REG_TMR_CLKOUT (0x0e) /* Timer Registers */
#define PCF2123_REG_CTDWN_TMR (0x0f)
/* PCF2123_REG_CTRL1 BITS */
#define CTRL1_CLEAR (0) /* Clear */
#define CTRL1_CORR_INT BIT(1) /* Correction irq enable */
#define CTRL1_12_HOUR BIT(2) /* 12 hour time */
#define CTRL1_SW_RESET (BIT(3) | BIT(4) | BIT(6)) /* Software reset */
#define CTRL1_STOP BIT(5) /* Stop the clock */
#define CTRL1_EXT_TEST BIT(7) /* External clock test mode */
/* PCF2123_REG_CTRL2 BITS */
#define CTRL2_TIE BIT(0) /* Countdown timer irq enable */
#define CTRL2_AIE BIT(1) /* Alarm irq enable */
#define CTRL2_TF BIT(2) /* Countdown timer flag */
#define CTRL2_AF BIT(3) /* Alarm flag */
#define CTRL2_TI_TP BIT(4) /* Irq pin generates pulse */
#define CTRL2_MSF BIT(5) /* Minute or second irq flag */
#define CTRL2_SI BIT(6) /* Second irq enable */
#define CTRL2_MI BIT(7) /* Minute irq enable */
/* PCF2123_REG_SC BITS */
#define OSC_HAS_STOPPED BIT(7) /* Clock has been stopped */
/* PCF2123_REG_ALRM_XX BITS */
#define ALRM_DISABLE BIT(7) /* MN, HR, DM, or DW alarm matching */
/* PCF2123_REG_TMR_CLKOUT BITS */
#define CD_TMR_4096KHZ (0) /* 4096 KHz countdown timer */
#define CD_TMR_64HZ (1) /* 64 Hz countdown timer */
#define CD_TMR_1HZ (2) /* 1 Hz countdown timer */
#define CD_TMR_60th_HZ (3) /* 60th Hz countdown timer */
#define CD_TMR_TE BIT(3) /* Countdown timer enable */
/* PCF2123_REG_OFFSET BITS */
#define OFFSET_SIGN_BIT 6 /* 2's complement sign bit */
#define OFFSET_COARSE BIT(7) /* Coarse mode offset */
#define OFFSET_STEP (2170) /* Offset step in parts per billion */
#define OFFSET_MASK GENMASK(6, 0) /* Offset value */
/* READ/WRITE ADDRESS BITS */
#define PCF2123_WRITE BIT(4)
#define PCF2123_READ (BIT(4) | BIT(7))
static struct spi_driver pcf2123_driver;
struct pcf2123_data {
struct rtc_device *rtc;
struct regmap *map;
};
static const struct regmap_config pcf2123_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.read_flag_mask = PCF2123_READ,
.write_flag_mask = PCF2123_WRITE,
.max_register = PCF2123_REG_CTDWN_TMR,
};
static int pcf2123_read_offset(struct device *dev, long *offset)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
int ret, val;
unsigned int reg;
ret = regmap_read(pcf2123->map, PCF2123_REG_OFFSET, ®);
if (ret)
return ret;
val = sign_extend32((reg & OFFSET_MASK), OFFSET_SIGN_BIT);
if (reg & OFFSET_COARSE)
val *= 2;
*offset = ((long)val) * OFFSET_STEP;
return 0;
}
/*
* The offset register is a 7 bit signed value with a coarse bit in bit 7.
* The main difference between the two is normal offset adjusts the first
* second of n minutes every other hour, with 61, 62 and 63 being shoved
* into the 60th minute.
* The coarse adjustment does the same, but every hour.
* the two overlap, with every even normal offset value corresponding
* to a coarse offset. Based on this algorithm, it seems that despite the
* name, coarse offset is a better fit for overlapping values.
*/
static int pcf2123_set_offset(struct device *dev, long offset)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
s8 reg;
if (offset > OFFSET_STEP * 127)
reg = 127;
else if (offset < OFFSET_STEP * -128)
reg = -128;
else
reg = DIV_ROUND_CLOSEST(offset, OFFSET_STEP);
/* choose fine offset only for odd values in the normal range */
if (reg & 1 && reg <= 63 && reg >= -64) {
/* Normal offset. Clear the coarse bit */
reg &= ~OFFSET_COARSE;
} else {
/* Coarse offset. Divide by 2 and set the coarse bit */
reg >>= 1;
reg |= OFFSET_COARSE;
}
return regmap_write(pcf2123->map, PCF2123_REG_OFFSET, (unsigned int)reg);
}
static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
u8 rxbuf[7];
int ret;
ret = regmap_bulk_read(pcf2123->map, PCF2123_REG_SC, rxbuf,
sizeof(rxbuf));
if (ret)
return ret;
if (rxbuf[0] & OSC_HAS_STOPPED) {
dev_info(dev, "clock was stopped. Time is not valid\n");
return -EINVAL;
}
tm->tm_sec = bcd2bin(rxbuf[0] & 0x7F);
tm->tm_min = bcd2bin(rxbuf[1] & 0x7F);
tm->tm_hour = bcd2bin(rxbuf[2] & 0x3F); /* rtc hr 0-23 */
tm->tm_mday = bcd2bin(rxbuf[3] & 0x3F);
tm->tm_wday = rxbuf[4] & 0x07;
tm->tm_mon = bcd2bin(rxbuf[5] & 0x1F) - 1; /* rtc mn 1-12 */
tm->tm_year = bcd2bin(rxbuf[6]) + 100;
dev_dbg(dev, "%s: tm is %ptR\n", __func__, tm);
return 0;
}
static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
u8 txbuf[7];
int ret;
dev_dbg(dev, "%s: tm is %ptR\n", __func__, tm);
/* Stop the counter first */
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_STOP);
if (ret)
return ret;
/* Set the new time */
txbuf[0] = bin2bcd(tm->tm_sec & 0x7F);
txbuf[1] = bin2bcd(tm->tm_min & 0x7F);
txbuf[2] = bin2bcd(tm->tm_hour & 0x3F);
txbuf[3] = bin2bcd(tm->tm_mday & 0x3F);
txbuf[4] = tm->tm_wday & 0x07;
txbuf[5] = bin2bcd((tm->tm_mon + 1) & 0x1F); /* rtc mn 1-12 */
txbuf[6] = bin2bcd(tm->tm_year - 100);
ret = regmap_bulk_write(pcf2123->map, PCF2123_REG_SC, txbuf,
sizeof(txbuf));
if (ret)
return ret;
/* Start the counter */
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_CLEAR);
if (ret)
return ret;
return 0;
}
static int pcf2123_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
return regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AIE,
en ? CTRL2_AIE : 0);
}
static int pcf2123_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
u8 rxbuf[4];
int ret;
unsigned int val = 0;
ret = regmap_bulk_read(pcf2123->map, PCF2123_REG_ALRM_MN, rxbuf,
sizeof(rxbuf));
if (ret)
return ret;
alm->time.tm_min = bcd2bin(rxbuf[0] & 0x7F);
alm->time.tm_hour = bcd2bin(rxbuf[1] & 0x3F);
alm->time.tm_mday = bcd2bin(rxbuf[2] & 0x3F);
alm->time.tm_wday = bcd2bin(rxbuf[3] & 0x07);
dev_dbg(dev, "%s: alm is %ptR\n", __func__, &alm->time);
ret = regmap_read(pcf2123->map, PCF2123_REG_CTRL2, &val);
if (ret)
return ret;
alm->enabled = !!(val & CTRL2_AIE);
return 0;
}
static int pcf2123_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
u8 txbuf[4];
int ret;
dev_dbg(dev, "%s: alm is %ptR\n", __func__, &alm->time);
/* Disable alarm interrupt */
ret = regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AIE, 0);
if (ret)
return ret;
/* Ensure alarm flag is clear */
ret = regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AF, 0);
if (ret)
return ret;
/* Set new alarm */
txbuf[0] = bin2bcd(alm->time.tm_min & 0x7F);
txbuf[1] = bin2bcd(alm->time.tm_hour & 0x3F);
txbuf[2] = bin2bcd(alm->time.tm_mday & 0x3F);
txbuf[3] = ALRM_DISABLE;
ret = regmap_bulk_write(pcf2123->map, PCF2123_REG_ALRM_MN, txbuf,
sizeof(txbuf));
if (ret)
return ret;
return pcf2123_rtc_alarm_irq_enable(dev, alm->enabled);
}
static irqreturn_t pcf2123_rtc_irq(int irq, void *dev)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
unsigned int val = 0;
int ret = IRQ_NONE;
rtc_lock(pcf2123->rtc);
regmap_read(pcf2123->map, PCF2123_REG_CTRL2, &val);
/* Alarm? */
if (val & CTRL2_AF) {
ret = IRQ_HANDLED;
/* Clear alarm flag */
regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AF, 0);
rtc_update_irq(pcf2123->rtc, 1, RTC_IRQF | RTC_AF);
}
rtc_unlock(pcf2123->rtc);
return ret;
}
static int pcf2123_reset(struct device *dev)
{
struct pcf2123_data *pcf2123 = dev_get_drvdata(dev);
int ret;
unsigned int val = 0;
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_SW_RESET);
if (ret)
return ret;
/* Stop the counter */
dev_dbg(dev, "stopping RTC\n");
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_STOP);
if (ret)
return ret;
/* See if the counter was actually stopped */
dev_dbg(dev, "checking for presence of RTC\n");
ret = regmap_read(pcf2123->map, PCF2123_REG_CTRL1, &val);
if (ret)
return ret;
dev_dbg(dev, "received data from RTC (0x%08X)\n", val);
if (!(val & CTRL1_STOP))
return -ENODEV;
/* Start the counter */
ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_CLEAR);
if (ret)
return ret;
return 0;
}
static const struct rtc_class_ops pcf2123_rtc_ops = {
.read_time = pcf2123_rtc_read_time,
.set_time = pcf2123_rtc_set_time,
.read_offset = pcf2123_read_offset,
.set_offset = pcf2123_set_offset,
.read_alarm = pcf2123_rtc_read_alarm,
.set_alarm = pcf2123_rtc_set_alarm,
.alarm_irq_enable = pcf2123_rtc_alarm_irq_enable,
};
static int pcf2123_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
struct rtc_time tm;
struct pcf2123_data *pcf2123;
int ret = 0;
pcf2123 = devm_kzalloc(&spi->dev, sizeof(struct pcf2123_data),
GFP_KERNEL);
if (!pcf2123)
return -ENOMEM;
dev_set_drvdata(&spi->dev, pcf2123);
pcf2123->map = devm_regmap_init_spi(spi, &pcf2123_regmap_config);
if (IS_ERR(pcf2123->map)) {
dev_err(&spi->dev, "regmap init failed.\n");
return PTR_ERR(pcf2123->map);
}
ret = pcf2123_rtc_read_time(&spi->dev, &tm);
if (ret < 0) {
ret = pcf2123_reset(&spi->dev);
if (ret < 0) {
dev_err(&spi->dev, "chip not found\n");
return ret;
}
}
dev_info(&spi->dev, "spiclk %u KHz.\n",
(spi->max_speed_hz + 500) / 1000);
/* Finalize the initialization */
rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
pcf2123->rtc = rtc;
/* Register alarm irq */
if (spi->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&spi->dev))
irqflags = 0;
ret = devm_request_threaded_irq(&spi->dev, spi->irq, NULL,
pcf2123_rtc_irq,
irqflags | IRQF_ONESHOT,
pcf2123_driver.driver.name, &spi->dev);
if (!ret)
device_init_wakeup(&spi->dev, true);
else
dev_err(&spi->dev, "could not request irq.\n");
}
/* The PCF2123's alarm only has minute accuracy. Must add timer
* support to this driver to generate interrupts more than once
* per minute.
*/
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
rtc->ops = &pcf2123_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
rtc->set_start_time = true;
ret = devm_rtc_register_device(rtc);
if (ret)
return ret;
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id pcf2123_dt_ids[] = {
{ .compatible = "nxp,pcf2123", },
{ .compatible = "microcrystal,rv2123", },
/* Deprecated, do not use */
{ .compatible = "nxp,rtc-pcf2123", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pcf2123_dt_ids);
#endif
static const struct spi_device_id pcf2123_spi_ids[] = {
{ .name = "pcf2123", },
{ .name = "rv2123", },
{ .name = "rtc-pcf2123", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, pcf2123_spi_ids);
static struct spi_driver pcf2123_driver = {
.driver = {
.name = "rtc-pcf2123",
.of_match_table = of_match_ptr(pcf2123_dt_ids),
},
.probe = pcf2123_probe,
.id_table = pcf2123_spi_ids,
};
module_spi_driver(pcf2123_driver);
MODULE_AUTHOR("Chris Verges <[email protected]>");
MODULE_DESCRIPTION("NXP PCF2123 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-pcf2123.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Haoyu HYM8563 RTC driver
*
* Copyright (C) 2013 MundoReader S.L.
* Author: Heiko Stuebner <[email protected]>
*
* based on rtc-HYM8563
* Copyright (C) 2010 ROCKCHIP, Inc.
*/
#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#define HYM8563_CTL1 0x00
#define HYM8563_CTL1_TEST BIT(7)
#define HYM8563_CTL1_STOP BIT(5)
#define HYM8563_CTL1_TESTC BIT(3)
#define HYM8563_CTL2 0x01
#define HYM8563_CTL2_TI_TP BIT(4)
#define HYM8563_CTL2_AF BIT(3)
#define HYM8563_CTL2_TF BIT(2)
#define HYM8563_CTL2_AIE BIT(1)
#define HYM8563_CTL2_TIE BIT(0)
#define HYM8563_SEC 0x02
#define HYM8563_SEC_VL BIT(7)
#define HYM8563_SEC_MASK 0x7f
#define HYM8563_MIN 0x03
#define HYM8563_MIN_MASK 0x7f
#define HYM8563_HOUR 0x04
#define HYM8563_HOUR_MASK 0x3f
#define HYM8563_DAY 0x05
#define HYM8563_DAY_MASK 0x3f
#define HYM8563_WEEKDAY 0x06
#define HYM8563_WEEKDAY_MASK 0x07
#define HYM8563_MONTH 0x07
#define HYM8563_MONTH_CENTURY BIT(7)
#define HYM8563_MONTH_MASK 0x1f
#define HYM8563_YEAR 0x08
#define HYM8563_ALM_MIN 0x09
#define HYM8563_ALM_HOUR 0x0a
#define HYM8563_ALM_DAY 0x0b
#define HYM8563_ALM_WEEK 0x0c
/* Each alarm check can be disabled by setting this bit in the register */
#define HYM8563_ALM_BIT_DISABLE BIT(7)
#define HYM8563_CLKOUT 0x0d
#define HYM8563_CLKOUT_ENABLE BIT(7)
#define HYM8563_CLKOUT_32768 0
#define HYM8563_CLKOUT_1024 1
#define HYM8563_CLKOUT_32 2
#define HYM8563_CLKOUT_1 3
#define HYM8563_CLKOUT_MASK 3
#define HYM8563_TMR_CTL 0x0e
#define HYM8563_TMR_CTL_ENABLE BIT(7)
#define HYM8563_TMR_CTL_4096 0
#define HYM8563_TMR_CTL_64 1
#define HYM8563_TMR_CTL_1 2
#define HYM8563_TMR_CTL_1_60 3
#define HYM8563_TMR_CTL_MASK 3
#define HYM8563_TMR_CNT 0x0f
struct hym8563 {
struct i2c_client *client;
struct rtc_device *rtc;
#ifdef CONFIG_COMMON_CLK
struct clk_hw clkout_hw;
#endif
};
/*
* RTC handling
*/
static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
u8 buf[7];
int ret;
ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
if (ret < 0)
return ret;
if (buf[0] & HYM8563_SEC_VL) {
dev_warn(&client->dev,
"no valid clock/calendar values available\n");
return -EINVAL;
}
tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK);
tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK);
tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK);
tm->tm_mday = bcd2bin(buf[3] & HYM8563_DAY_MASK);
tm->tm_wday = bcd2bin(buf[4] & HYM8563_WEEKDAY_MASK); /* 0 = Sun */
tm->tm_mon = bcd2bin(buf[5] & HYM8563_MONTH_MASK) - 1; /* 0 = Jan */
tm->tm_year = bcd2bin(buf[6]) + 100;
return 0;
}
static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
u8 buf[7];
int ret;
/* Years >= 2100 are to far in the future, 19XX is to early */
if (tm->tm_year < 100 || tm->tm_year >= 200)
return -EINVAL;
buf[0] = bin2bcd(tm->tm_sec);
buf[1] = bin2bcd(tm->tm_min);
buf[2] = bin2bcd(tm->tm_hour);
buf[3] = bin2bcd(tm->tm_mday);
buf[4] = bin2bcd(tm->tm_wday);
buf[5] = bin2bcd(tm->tm_mon + 1);
/*
* While the HYM8563 has a century flag in the month register,
* it does not seem to carry it over a subsequent write/read.
* So we'll limit ourself to 100 years, starting at 2000 for now.
*/
buf[6] = bin2bcd(tm->tm_year - 100);
/*
* CTL1 only contains TEST-mode bits apart from stop,
* so no need to read the value first
*/
ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1,
HYM8563_CTL1_STOP);
if (ret < 0)
return ret;
ret = i2c_smbus_write_i2c_block_data(client, HYM8563_SEC, 7, buf);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0);
if (ret < 0)
return ret;
return 0;
}
static int hym8563_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
int data;
data = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (data < 0)
return data;
if (enabled)
data |= HYM8563_CTL2_AIE;
else
data &= ~HYM8563_CTL2_AIE;
return i2c_smbus_write_byte_data(client, HYM8563_CTL2, data);
};
static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct i2c_client *client = to_i2c_client(dev);
struct rtc_time *alm_tm = &alm->time;
u8 buf[4];
int ret;
ret = i2c_smbus_read_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf);
if (ret < 0)
return ret;
/* The alarm only has a minute accuracy */
alm_tm->tm_sec = 0;
alm_tm->tm_min = (buf[0] & HYM8563_ALM_BIT_DISABLE) ?
-1 :
bcd2bin(buf[0] & HYM8563_MIN_MASK);
alm_tm->tm_hour = (buf[1] & HYM8563_ALM_BIT_DISABLE) ?
-1 :
bcd2bin(buf[1] & HYM8563_HOUR_MASK);
alm_tm->tm_mday = (buf[2] & HYM8563_ALM_BIT_DISABLE) ?
-1 :
bcd2bin(buf[2] & HYM8563_DAY_MASK);
alm_tm->tm_wday = (buf[3] & HYM8563_ALM_BIT_DISABLE) ?
-1 :
bcd2bin(buf[3] & HYM8563_WEEKDAY_MASK);
ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (ret < 0)
return ret;
if (ret & HYM8563_CTL2_AIE)
alm->enabled = 1;
return 0;
}
static int hym8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct i2c_client *client = to_i2c_client(dev);
struct rtc_time *alm_tm = &alm->time;
u8 buf[4];
int ret;
ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (ret < 0)
return ret;
ret &= ~HYM8563_CTL2_AIE;
ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret);
if (ret < 0)
return ret;
buf[0] = (alm_tm->tm_min < 60 && alm_tm->tm_min >= 0) ?
bin2bcd(alm_tm->tm_min) : HYM8563_ALM_BIT_DISABLE;
buf[1] = (alm_tm->tm_hour < 24 && alm_tm->tm_hour >= 0) ?
bin2bcd(alm_tm->tm_hour) : HYM8563_ALM_BIT_DISABLE;
buf[2] = (alm_tm->tm_mday <= 31 && alm_tm->tm_mday >= 1) ?
bin2bcd(alm_tm->tm_mday) : HYM8563_ALM_BIT_DISABLE;
buf[3] = (alm_tm->tm_wday < 7 && alm_tm->tm_wday >= 0) ?
bin2bcd(alm_tm->tm_wday) : HYM8563_ALM_BIT_DISABLE;
ret = i2c_smbus_write_i2c_block_data(client, HYM8563_ALM_MIN, 4, buf);
if (ret < 0)
return ret;
return hym8563_rtc_alarm_irq_enable(dev, alm->enabled);
}
static const struct rtc_class_ops hym8563_rtc_ops = {
.read_time = hym8563_rtc_read_time,
.set_time = hym8563_rtc_set_time,
.alarm_irq_enable = hym8563_rtc_alarm_irq_enable,
.read_alarm = hym8563_rtc_read_alarm,
.set_alarm = hym8563_rtc_set_alarm,
};
/*
* Handling of the clkout
*/
#ifdef CONFIG_COMMON_CLK
#define clkout_hw_to_hym8563(_hw) container_of(_hw, struct hym8563, clkout_hw)
static int clkout_rates[] = {
32768,
1024,
32,
1,
};
static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
struct i2c_client *client = hym8563->client;
int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
if (ret < 0)
return 0;
ret &= HYM8563_CLKOUT_MASK;
return clkout_rates[ret];
}
static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
if (clkout_rates[i] <= rate)
return clkout_rates[i];
return 0;
}
static int hym8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
struct i2c_client *client = hym8563->client;
int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
int i;
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
if (clkout_rates[i] == rate) {
ret &= ~HYM8563_CLKOUT_MASK;
ret |= i;
return i2c_smbus_write_byte_data(client,
HYM8563_CLKOUT, ret);
}
return -EINVAL;
}
static int hym8563_clkout_control(struct clk_hw *hw, bool enable)
{
struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
struct i2c_client *client = hym8563->client;
int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
if (ret < 0)
return ret;
if (enable)
ret |= HYM8563_CLKOUT_ENABLE;
else
ret &= ~HYM8563_CLKOUT_ENABLE;
return i2c_smbus_write_byte_data(client, HYM8563_CLKOUT, ret);
}
static int hym8563_clkout_prepare(struct clk_hw *hw)
{
return hym8563_clkout_control(hw, 1);
}
static void hym8563_clkout_unprepare(struct clk_hw *hw)
{
hym8563_clkout_control(hw, 0);
}
static int hym8563_clkout_is_prepared(struct clk_hw *hw)
{
struct hym8563 *hym8563 = clkout_hw_to_hym8563(hw);
struct i2c_client *client = hym8563->client;
int ret = i2c_smbus_read_byte_data(client, HYM8563_CLKOUT);
if (ret < 0)
return ret;
return !!(ret & HYM8563_CLKOUT_ENABLE);
}
static const struct clk_ops hym8563_clkout_ops = {
.prepare = hym8563_clkout_prepare,
.unprepare = hym8563_clkout_unprepare,
.is_prepared = hym8563_clkout_is_prepared,
.recalc_rate = hym8563_clkout_recalc_rate,
.round_rate = hym8563_clkout_round_rate,
.set_rate = hym8563_clkout_set_rate,
};
static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563)
{
struct i2c_client *client = hym8563->client;
struct device_node *node = client->dev.of_node;
struct clk *clk;
struct clk_init_data init;
int ret;
ret = i2c_smbus_write_byte_data(client, HYM8563_CLKOUT,
0);
if (ret < 0)
return ERR_PTR(ret);
init.name = "hym8563-clkout";
init.ops = &hym8563_clkout_ops;
init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
hym8563->clkout_hw.init = &init;
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
/* register the clock */
clk = clk_register(&client->dev, &hym8563->clkout_hw);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
#endif
/*
* The alarm interrupt is implemented as a level-low interrupt in the
* hym8563, while the timer interrupt uses a falling edge.
* We don't use the timer at all, so the interrupt is requested to
* use the level-low trigger.
*/
static irqreturn_t hym8563_irq(int irq, void *dev_id)
{
struct hym8563 *hym8563 = (struct hym8563 *)dev_id;
struct i2c_client *client = hym8563->client;
int data, ret;
rtc_lock(hym8563->rtc);
/* Clear the alarm flag */
data = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (data < 0) {
dev_err(&client->dev, "%s: error reading i2c data %d\n",
__func__, data);
goto out;
}
data &= ~HYM8563_CTL2_AF;
ret = i2c_smbus_write_byte_data(client, HYM8563_CTL2, data);
if (ret < 0) {
dev_err(&client->dev, "%s: error writing i2c data %d\n",
__func__, ret);
}
out:
rtc_unlock(hym8563->rtc);
return IRQ_HANDLED;
}
static int hym8563_init_device(struct i2c_client *client)
{
int ret;
/* Clear stop flag if present */
ret = i2c_smbus_write_byte_data(client, HYM8563_CTL1, 0);
if (ret < 0)
return ret;
ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (ret < 0)
return ret;
/* Disable alarm and timer interrupts */
ret &= ~HYM8563_CTL2_AIE;
ret &= ~HYM8563_CTL2_TIE;
/* Clear any pending alarm and timer flags */
if (ret & HYM8563_CTL2_AF)
ret &= ~HYM8563_CTL2_AF;
if (ret & HYM8563_CTL2_TF)
ret &= ~HYM8563_CTL2_TF;
ret &= ~HYM8563_CTL2_TI_TP;
return i2c_smbus_write_byte_data(client, HYM8563_CTL2, ret);
}
#ifdef CONFIG_PM_SLEEP
static int hym8563_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
if (device_may_wakeup(dev)) {
ret = enable_irq_wake(client->irq);
if (ret) {
dev_err(dev, "enable_irq_wake failed, %d\n", ret);
return ret;
}
}
return 0;
}
static int hym8563_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
if (device_may_wakeup(dev))
disable_irq_wake(client->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume);
static int hym8563_probe(struct i2c_client *client)
{
struct hym8563 *hym8563;
int ret;
hym8563 = devm_kzalloc(&client->dev, sizeof(*hym8563), GFP_KERNEL);
if (!hym8563)
return -ENOMEM;
hym8563->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(hym8563->rtc))
return PTR_ERR(hym8563->rtc);
hym8563->client = client;
i2c_set_clientdata(client, hym8563);
ret = hym8563_init_device(client);
if (ret) {
dev_err(&client->dev, "could not init device, %d\n", ret);
return ret;
}
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&client->dev))
irqflags = 0;
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, hym8563_irq,
irqflags | IRQF_ONESHOT,
client->name, hym8563);
if (ret < 0) {
dev_err(&client->dev, "irq %d request failed, %d\n",
client->irq, ret);
return ret;
}
}
if (client->irq > 0 ||
device_property_read_bool(&client->dev, "wakeup-source")) {
device_init_wakeup(&client->dev, true);
}
/* check state of calendar information */
ret = i2c_smbus_read_byte_data(client, HYM8563_SEC);
if (ret < 0)
return ret;
dev_dbg(&client->dev, "rtc information is %s\n",
(ret & HYM8563_SEC_VL) ? "invalid" : "valid");
hym8563->rtc->ops = &hym8563_rtc_ops;
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, hym8563->rtc->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, hym8563->rtc->features);
#ifdef CONFIG_COMMON_CLK
hym8563_clkout_register_clk(hym8563);
#endif
return devm_rtc_register_device(hym8563->rtc);
}
static const struct i2c_device_id hym8563_id[] = {
{ "hym8563", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, hym8563_id);
static const struct of_device_id hym8563_dt_idtable[] = {
{ .compatible = "haoyu,hym8563" },
{},
};
MODULE_DEVICE_TABLE(of, hym8563_dt_idtable);
static struct i2c_driver hym8563_driver = {
.driver = {
.name = "rtc-hym8563",
.pm = &hym8563_pm_ops,
.of_match_table = hym8563_dt_idtable,
},
.probe = hym8563_probe,
.id_table = hym8563_id,
};
module_i2c_driver(hym8563_driver);
MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
MODULE_DESCRIPTION("HYM8563 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-hym8563.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, proc interface
*
* Copyright (C) 2005-06 Tower Technologies
* Author: Alessandro Zummo <[email protected]>
*
* based on arch/arm/common/rtctime.c
*/
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "rtc-core.h"
#define NAME_SIZE 10
#if defined(CONFIG_RTC_HCTOSYS_DEVICE)
static bool is_rtc_hctosys(struct rtc_device *rtc)
{
int size;
char name[NAME_SIZE];
size = snprintf(name, NAME_SIZE, "rtc%d", rtc->id);
if (size >= NAME_SIZE)
return false;
return !strncmp(name, CONFIG_RTC_HCTOSYS_DEVICE, NAME_SIZE);
}
#else
static bool is_rtc_hctosys(struct rtc_device *rtc)
{
return (rtc->id == 0);
}
#endif
static int rtc_proc_show(struct seq_file *seq, void *offset)
{
int err;
struct rtc_device *rtc = seq->private;
const struct rtc_class_ops *ops = rtc->ops;
struct rtc_wkalrm alrm;
struct rtc_time tm;
err = rtc_read_time(rtc, &tm);
if (err == 0) {
seq_printf(seq,
"rtc_time\t: %ptRt\n"
"rtc_date\t: %ptRd\n",
&tm, &tm);
}
err = rtc_read_alarm(rtc, &alrm);
if (err == 0) {
seq_printf(seq, "alrm_time\t: %ptRt\n", &alrm.time);
seq_printf(seq, "alrm_date\t: %ptRd\n", &alrm.time);
seq_printf(seq, "alarm_IRQ\t: %s\n",
alrm.enabled ? "yes" : "no");
seq_printf(seq, "alrm_pending\t: %s\n",
alrm.pending ? "yes" : "no");
seq_printf(seq, "update IRQ enabled\t: %s\n",
(rtc->uie_rtctimer.enabled) ? "yes" : "no");
seq_printf(seq, "periodic IRQ enabled\t: %s\n",
(rtc->pie_enabled) ? "yes" : "no");
seq_printf(seq, "periodic IRQ frequency\t: %d\n",
rtc->irq_freq);
seq_printf(seq, "max user IRQ frequency\t: %d\n",
rtc->max_user_freq);
}
seq_printf(seq, "24hr\t\t: yes\n");
if (ops->proc)
ops->proc(rtc->dev.parent, seq);
return 0;
}
void rtc_proc_add_device(struct rtc_device *rtc)
{
if (is_rtc_hctosys(rtc))
proc_create_single_data("driver/rtc", 0, NULL, rtc_proc_show,
rtc);
}
void rtc_proc_del_device(struct rtc_device *rtc)
{
if (is_rtc_hctosys(rtc))
remove_proc_entry("driver/rtc", NULL);
}
| linux-master | drivers/rtc/proc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* An I2C driver for the Intersil ISL 12026
*
* Copyright (c) 2018 Cavium, Inc.
*/
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/slab.h>
/* register offsets */
#define ISL12026_REG_PWR 0x14
# define ISL12026_REG_PWR_BSW BIT(6)
# define ISL12026_REG_PWR_SBIB BIT(7)
#define ISL12026_REG_SC 0x30
#define ISL12026_REG_HR 0x32
# define ISL12026_REG_HR_MIL BIT(7) /* military or 24 hour time */
#define ISL12026_REG_SR 0x3f
# define ISL12026_REG_SR_RTCF BIT(0)
# define ISL12026_REG_SR_WEL BIT(1)
# define ISL12026_REG_SR_RWEL BIT(2)
# define ISL12026_REG_SR_MBZ BIT(3)
# define ISL12026_REG_SR_OSCF BIT(4)
/* The EEPROM array responds at i2c address 0x57 */
#define ISL12026_EEPROM_ADDR 0x57
#define ISL12026_PAGESIZE 16
#define ISL12026_NVMEM_WRITE_TIME 20
struct isl12026 {
struct rtc_device *rtc;
struct i2c_client *nvm_client;
};
static int isl12026_read_reg(struct i2c_client *client, int reg)
{
u8 addr[] = {0, reg};
u8 val;
int ret;
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0,
.len = sizeof(addr),
.buf = addr
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &val
}
};
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs)) {
dev_err(&client->dev, "read reg error, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
} else {
ret = val;
}
return ret;
}
static int isl12026_arm_write(struct i2c_client *client)
{
int ret;
u8 op[3];
struct i2c_msg msg = {
.addr = client->addr,
.flags = 0,
.len = 1,
.buf = op
};
/* Set SR.WEL */
op[0] = 0;
op[1] = ISL12026_REG_SR;
op[2] = ISL12026_REG_SR_WEL;
msg.len = 3;
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret != 1) {
dev_err(&client->dev, "write error SR.WEL, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
goto out;
}
/* Set SR.WEL and SR.RWEL */
op[2] = ISL12026_REG_SR_WEL | ISL12026_REG_SR_RWEL;
msg.len = 3;
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret != 1) {
dev_err(&client->dev,
"write error SR.WEL|SR.RWEL, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
goto out;
} else {
ret = 0;
}
out:
return ret;
}
static int isl12026_disarm_write(struct i2c_client *client)
{
int ret;
u8 op[3] = {0, ISL12026_REG_SR, 0};
struct i2c_msg msg = {
.addr = client->addr,
.flags = 0,
.len = sizeof(op),
.buf = op
};
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret != 1) {
dev_err(&client->dev,
"write error SR, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
} else {
ret = 0;
}
return ret;
}
static int isl12026_write_reg(struct i2c_client *client, int reg, u8 val)
{
int ret;
u8 op[3] = {0, reg, val};
struct i2c_msg msg = {
.addr = client->addr,
.flags = 0,
.len = sizeof(op),
.buf = op
};
ret = isl12026_arm_write(client);
if (ret)
return ret;
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret != 1) {
dev_err(&client->dev, "write error CCR, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
goto out;
}
msleep(ISL12026_NVMEM_WRITE_TIME);
ret = isl12026_disarm_write(client);
out:
return ret;
}
static int isl12026_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
u8 op[10];
struct i2c_msg msg = {
.addr = client->addr,
.flags = 0,
.len = sizeof(op),
.buf = op
};
ret = isl12026_arm_write(client);
if (ret)
return ret;
/* Set the CCR registers */
op[0] = 0;
op[1] = ISL12026_REG_SC;
op[2] = bin2bcd(tm->tm_sec); /* SC */
op[3] = bin2bcd(tm->tm_min); /* MN */
op[4] = bin2bcd(tm->tm_hour) | ISL12026_REG_HR_MIL; /* HR */
op[5] = bin2bcd(tm->tm_mday); /* DT */
op[6] = bin2bcd(tm->tm_mon + 1); /* MO */
op[7] = bin2bcd(tm->tm_year % 100); /* YR */
op[8] = bin2bcd(tm->tm_wday & 7); /* DW */
op[9] = bin2bcd(tm->tm_year >= 100 ? 20 : 19); /* Y2K */
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret != 1) {
dev_err(&client->dev, "write error CCR, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
goto out;
}
ret = isl12026_disarm_write(client);
out:
return ret;
}
static int isl12026_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
u8 ccr[8];
u8 addr[2];
u8 sr;
int ret;
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0,
.len = sizeof(addr),
.buf = addr
}, {
.addr = client->addr,
.flags = I2C_M_RD,
}
};
/* First, read SR */
addr[0] = 0;
addr[1] = ISL12026_REG_SR;
msgs[1].len = 1;
msgs[1].buf = &sr;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs)) {
dev_err(&client->dev, "read error, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
goto out;
}
if (sr & ISL12026_REG_SR_RTCF)
dev_warn(&client->dev, "Real-Time Clock Failure on read\n");
if (sr & ISL12026_REG_SR_OSCF)
dev_warn(&client->dev, "Oscillator Failure on read\n");
/* Second, CCR regs */
addr[0] = 0;
addr[1] = ISL12026_REG_SC;
msgs[1].len = sizeof(ccr);
msgs[1].buf = ccr;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs)) {
dev_err(&client->dev, "read error, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
goto out;
}
tm->tm_sec = bcd2bin(ccr[0] & 0x7F);
tm->tm_min = bcd2bin(ccr[1] & 0x7F);
if (ccr[2] & ISL12026_REG_HR_MIL)
tm->tm_hour = bcd2bin(ccr[2] & 0x3F);
else
tm->tm_hour = bcd2bin(ccr[2] & 0x1F) +
((ccr[2] & 0x20) ? 12 : 0);
tm->tm_mday = bcd2bin(ccr[3] & 0x3F);
tm->tm_mon = bcd2bin(ccr[4] & 0x1F) - 1;
tm->tm_year = bcd2bin(ccr[5]);
if (bcd2bin(ccr[7]) == 20)
tm->tm_year += 100;
tm->tm_wday = ccr[6] & 0x07;
ret = 0;
out:
return ret;
}
static const struct rtc_class_ops isl12026_rtc_ops = {
.read_time = isl12026_rtc_read_time,
.set_time = isl12026_rtc_set_time,
};
static int isl12026_nvm_read(void *p, unsigned int offset,
void *val, size_t bytes)
{
struct isl12026 *priv = p;
int ret;
u8 addr[2];
struct i2c_msg msgs[] = {
{
.addr = priv->nvm_client->addr,
.flags = 0,
.len = sizeof(addr),
.buf = addr
}, {
.addr = priv->nvm_client->addr,
.flags = I2C_M_RD,
.buf = val
}
};
/*
* offset and bytes checked and limited by nvmem core, so
* proceed without further checks.
*/
ret = mutex_lock_interruptible(&priv->rtc->ops_lock);
if (ret)
return ret;
/* 2 bytes of address, most significant first */
addr[0] = offset >> 8;
addr[1] = offset;
msgs[1].len = bytes;
ret = i2c_transfer(priv->nvm_client->adapter, msgs, ARRAY_SIZE(msgs));
mutex_unlock(&priv->rtc->ops_lock);
if (ret != ARRAY_SIZE(msgs)) {
dev_err(&priv->nvm_client->dev,
"nvmem read error, ret=%d\n", ret);
return ret < 0 ? ret : -EIO;
}
return 0;
}
static int isl12026_nvm_write(void *p, unsigned int offset,
void *val, size_t bytes)
{
struct isl12026 *priv = p;
int ret;
u8 *v = val;
size_t chunk_size, num_written;
u8 payload[ISL12026_PAGESIZE + 2]; /* page + 2 address bytes */
struct i2c_msg msgs[] = {
{
.addr = priv->nvm_client->addr,
.flags = 0,
.buf = payload
}
};
/*
* offset and bytes checked and limited by nvmem core, so
* proceed without further checks.
*/
ret = mutex_lock_interruptible(&priv->rtc->ops_lock);
if (ret)
return ret;
num_written = 0;
while (bytes) {
chunk_size = round_down(offset, ISL12026_PAGESIZE) +
ISL12026_PAGESIZE - offset;
chunk_size = min(bytes, chunk_size);
/*
* 2 bytes of address, most significant first, followed
* by page data bytes
*/
memcpy(payload + 2, v + num_written, chunk_size);
payload[0] = offset >> 8;
payload[1] = offset;
msgs[0].len = chunk_size + 2;
ret = i2c_transfer(priv->nvm_client->adapter,
msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs)) {
dev_err(&priv->nvm_client->dev,
"nvmem write error, ret=%d\n", ret);
ret = ret < 0 ? ret : -EIO;
break;
}
ret = 0;
bytes -= chunk_size;
offset += chunk_size;
num_written += chunk_size;
msleep(ISL12026_NVMEM_WRITE_TIME);
}
mutex_unlock(&priv->rtc->ops_lock);
return ret;
}
static void isl12026_force_power_modes(struct i2c_client *client)
{
int ret;
int pwr, requested_pwr;
u32 bsw_val, sbib_val;
bool set_bsw, set_sbib;
/*
* If we can read the of_property, set the specified value.
* If there is an error reading the of_property (likely
* because it does not exist), keep the current value.
*/
ret = of_property_read_u32(client->dev.of_node,
"isil,pwr-bsw", &bsw_val);
set_bsw = (ret == 0);
ret = of_property_read_u32(client->dev.of_node,
"isil,pwr-sbib", &sbib_val);
set_sbib = (ret == 0);
/* Check if PWR.BSW and/or PWR.SBIB need specified values */
if (!set_bsw && !set_sbib)
return;
pwr = isl12026_read_reg(client, ISL12026_REG_PWR);
if (pwr < 0) {
dev_warn(&client->dev, "Error: Failed to read PWR %d\n", pwr);
return;
}
requested_pwr = pwr;
if (set_bsw) {
if (bsw_val)
requested_pwr |= ISL12026_REG_PWR_BSW;
else
requested_pwr &= ~ISL12026_REG_PWR_BSW;
} /* else keep current BSW */
if (set_sbib) {
if (sbib_val)
requested_pwr |= ISL12026_REG_PWR_SBIB;
else
requested_pwr &= ~ISL12026_REG_PWR_SBIB;
} /* else keep current SBIB */
if (pwr >= 0 && pwr != requested_pwr) {
dev_dbg(&client->dev, "PWR: %02x\n", pwr);
dev_dbg(&client->dev, "Updating PWR to: %02x\n", requested_pwr);
isl12026_write_reg(client, ISL12026_REG_PWR, requested_pwr);
}
}
static int isl12026_probe(struct i2c_client *client)
{
struct isl12026 *priv;
int ret;
struct nvmem_config nvm_cfg = {
.name = "isl12026-",
.base_dev = &client->dev,
.stride = 1,
.word_size = 1,
.size = 512,
.reg_read = isl12026_nvm_read,
.reg_write = isl12026_nvm_write,
};
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
i2c_set_clientdata(client, priv);
isl12026_force_power_modes(client);
priv->nvm_client = i2c_new_dummy_device(client->adapter, ISL12026_EEPROM_ADDR);
if (IS_ERR(priv->nvm_client))
return PTR_ERR(priv->nvm_client);
priv->rtc = devm_rtc_allocate_device(&client->dev);
ret = PTR_ERR_OR_ZERO(priv->rtc);
if (ret)
return ret;
priv->rtc->ops = &isl12026_rtc_ops;
nvm_cfg.priv = priv;
ret = devm_rtc_nvmem_register(priv->rtc, &nvm_cfg);
if (ret)
return ret;
return devm_rtc_register_device(priv->rtc);
}
static void isl12026_remove(struct i2c_client *client)
{
struct isl12026 *priv = i2c_get_clientdata(client);
i2c_unregister_device(priv->nvm_client);
}
static const struct of_device_id isl12026_dt_match[] = {
{ .compatible = "isil,isl12026" },
{ }
};
MODULE_DEVICE_TABLE(of, isl12026_dt_match);
static struct i2c_driver isl12026_driver = {
.driver = {
.name = "rtc-isl12026",
.of_match_table = isl12026_dt_match,
},
.probe = isl12026_probe,
.remove = isl12026_remove,
};
module_i2c_driver(isl12026_driver);
MODULE_DESCRIPTION("ISL 12026 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-isl12026.c |
// SPDX-License-Identifier: GPL-2.0-only
/* rtc-ds1343.c
*
* Driver for Dallas Semiconductor DS1343 Low Current, SPI Compatible
* Real Time Clock
*
* Author : Raghavendra Chandra Ganiga <[email protected]>
* Ankur Srivastava <[email protected]> : DS1343 Nvram Support
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/pm.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#define DALLAS_MAXIM_DS1343 0
#define DALLAS_MAXIM_DS1344 1
/* RTC DS1343 Registers */
#define DS1343_SECONDS_REG 0x00
#define DS1343_MINUTES_REG 0x01
#define DS1343_HOURS_REG 0x02
#define DS1343_DAY_REG 0x03
#define DS1343_DATE_REG 0x04
#define DS1343_MONTH_REG 0x05
#define DS1343_YEAR_REG 0x06
#define DS1343_ALM0_SEC_REG 0x07
#define DS1343_ALM0_MIN_REG 0x08
#define DS1343_ALM0_HOUR_REG 0x09
#define DS1343_ALM0_DAY_REG 0x0A
#define DS1343_ALM1_SEC_REG 0x0B
#define DS1343_ALM1_MIN_REG 0x0C
#define DS1343_ALM1_HOUR_REG 0x0D
#define DS1343_ALM1_DAY_REG 0x0E
#define DS1343_CONTROL_REG 0x0F
#define DS1343_STATUS_REG 0x10
#define DS1343_TRICKLE_REG 0x11
#define DS1343_NVRAM 0x20
#define DS1343_NVRAM_LEN 96
/* DS1343 Control Registers bits */
#define DS1343_EOSC 0x80
#define DS1343_DOSF 0x20
#define DS1343_EGFIL 0x10
#define DS1343_SQW 0x08
#define DS1343_INTCN 0x04
#define DS1343_A1IE 0x02
#define DS1343_A0IE 0x01
/* DS1343 Status Registers bits */
#define DS1343_OSF 0x80
#define DS1343_IRQF1 0x02
#define DS1343_IRQF0 0x01
/* DS1343 Trickle Charger Registers bits */
#define DS1343_TRICKLE_MAGIC 0xa0
#define DS1343_TRICKLE_DS1 0x08
#define DS1343_TRICKLE_1K 0x01
#define DS1343_TRICKLE_2K 0x02
#define DS1343_TRICKLE_4K 0x03
static const struct spi_device_id ds1343_id[] = {
{ "ds1343", DALLAS_MAXIM_DS1343 },
{ "ds1344", DALLAS_MAXIM_DS1344 },
{ }
};
MODULE_DEVICE_TABLE(spi, ds1343_id);
struct ds1343_priv {
struct rtc_device *rtc;
struct regmap *map;
int irq;
};
static ssize_t ds1343_show_glitchfilter(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
int glitch_filt_status, data;
int res;
res = regmap_read(priv->map, DS1343_CONTROL_REG, &data);
if (res)
return res;
glitch_filt_status = !!(data & DS1343_EGFIL);
if (glitch_filt_status)
return sprintf(buf, "enabled\n");
else
return sprintf(buf, "disabled\n");
}
static ssize_t ds1343_store_glitchfilter(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
int data = 0;
int res;
if (strncmp(buf, "enabled", 7) == 0)
data = DS1343_EGFIL;
else if (strncmp(buf, "disabled", 8))
return -EINVAL;
res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
DS1343_EGFIL, data);
if (res)
return res;
return count;
}
static DEVICE_ATTR(glitch_filter, S_IRUGO | S_IWUSR, ds1343_show_glitchfilter,
ds1343_store_glitchfilter);
static int ds1343_nvram_write(void *priv, unsigned int off, void *val,
size_t bytes)
{
struct ds1343_priv *ds1343 = priv;
return regmap_bulk_write(ds1343->map, DS1343_NVRAM + off, val, bytes);
}
static int ds1343_nvram_read(void *priv, unsigned int off, void *val,
size_t bytes)
{
struct ds1343_priv *ds1343 = priv;
return regmap_bulk_read(ds1343->map, DS1343_NVRAM + off, val, bytes);
}
static ssize_t ds1343_show_tricklecharger(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
int res, data;
char *diodes = "disabled", *resistors = " ";
res = regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
if (res)
return res;
if ((data & 0xf0) == DS1343_TRICKLE_MAGIC) {
switch (data & 0x0c) {
case DS1343_TRICKLE_DS1:
diodes = "one diode,";
break;
default:
diodes = "no diode,";
break;
}
switch (data & 0x03) {
case DS1343_TRICKLE_1K:
resistors = "1k Ohm";
break;
case DS1343_TRICKLE_2K:
resistors = "2k Ohm";
break;
case DS1343_TRICKLE_4K:
resistors = "4k Ohm";
break;
default:
diodes = "disabled";
break;
}
}
return sprintf(buf, "%s %s\n", diodes, resistors);
}
static DEVICE_ATTR(trickle_charger, S_IRUGO, ds1343_show_tricklecharger, NULL);
static struct attribute *ds1343_attrs[] = {
&dev_attr_glitch_filter.attr,
&dev_attr_trickle_charger.attr,
NULL
};
static const struct attribute_group ds1343_attr_group = {
.attrs = ds1343_attrs,
};
static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
unsigned char buf[7];
int res;
res = regmap_bulk_read(priv->map, DS1343_SECONDS_REG, buf, 7);
if (res)
return res;
dt->tm_sec = bcd2bin(buf[0]);
dt->tm_min = bcd2bin(buf[1]);
dt->tm_hour = bcd2bin(buf[2] & 0x3F);
dt->tm_wday = bcd2bin(buf[3]) - 1;
dt->tm_mday = bcd2bin(buf[4]);
dt->tm_mon = bcd2bin(buf[5] & 0x1F) - 1;
dt->tm_year = bcd2bin(buf[6]) + 100; /* year offset from 1900 */
return 0;
}
static int ds1343_set_time(struct device *dev, struct rtc_time *dt)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
u8 buf[7];
buf[0] = bin2bcd(dt->tm_sec);
buf[1] = bin2bcd(dt->tm_min);
buf[2] = bin2bcd(dt->tm_hour) & 0x3F;
buf[3] = bin2bcd(dt->tm_wday + 1);
buf[4] = bin2bcd(dt->tm_mday);
buf[5] = bin2bcd(dt->tm_mon + 1);
buf[6] = bin2bcd(dt->tm_year - 100);
return regmap_bulk_write(priv->map, DS1343_SECONDS_REG,
buf, sizeof(buf));
}
static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
unsigned char buf[4];
unsigned int val;
int res;
if (priv->irq <= 0)
return -EINVAL;
res = regmap_read(priv->map, DS1343_STATUS_REG, &val);
if (res)
return res;
alarm->pending = !!(val & DS1343_IRQF0);
res = regmap_read(priv->map, DS1343_CONTROL_REG, &val);
if (res)
return res;
alarm->enabled = !!(val & DS1343_A0IE);
res = regmap_bulk_read(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
if (res)
return res;
alarm->time.tm_sec = bcd2bin(buf[0]) & 0x7f;
alarm->time.tm_min = bcd2bin(buf[1]) & 0x7f;
alarm->time.tm_hour = bcd2bin(buf[2]) & 0x3f;
alarm->time.tm_mday = bcd2bin(buf[3]) & 0x3f;
return 0;
}
static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
unsigned char buf[4];
int res = 0;
if (priv->irq <= 0)
return -EINVAL;
res = regmap_update_bits(priv->map, DS1343_CONTROL_REG, DS1343_A0IE, 0);
if (res)
return res;
buf[0] = bin2bcd(alarm->time.tm_sec);
buf[1] = bin2bcd(alarm->time.tm_min);
buf[2] = bin2bcd(alarm->time.tm_hour);
buf[3] = bin2bcd(alarm->time.tm_mday);
res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
if (res)
return res;
if (alarm->enabled)
res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
DS1343_A0IE, DS1343_A0IE);
return res;
}
static int ds1343_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
if (priv->irq <= 0)
return -EINVAL;
return regmap_update_bits(priv->map, DS1343_CONTROL_REG,
DS1343_A0IE, enabled ? DS1343_A0IE : 0);
}
static irqreturn_t ds1343_thread(int irq, void *dev_id)
{
struct ds1343_priv *priv = dev_id;
unsigned int stat;
int res = 0;
rtc_lock(priv->rtc);
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
if (res)
goto out;
if (stat & DS1343_IRQF0) {
stat &= ~DS1343_IRQF0;
regmap_write(priv->map, DS1343_STATUS_REG, stat);
rtc_update_irq(priv->rtc, 1, RTC_AF | RTC_IRQF);
regmap_update_bits(priv->map, DS1343_CONTROL_REG,
DS1343_A0IE, 0);
}
out:
rtc_unlock(priv->rtc);
return IRQ_HANDLED;
}
static const struct rtc_class_ops ds1343_rtc_ops = {
.read_time = ds1343_read_time,
.set_time = ds1343_set_time,
.read_alarm = ds1343_read_alarm,
.set_alarm = ds1343_set_alarm,
.alarm_irq_enable = ds1343_alarm_irq_enable,
};
static int ds1343_probe(struct spi_device *spi)
{
struct ds1343_priv *priv;
struct regmap_config config = { .reg_bits = 8, .val_bits = 8,
.write_flag_mask = 0x80, };
unsigned int data;
int res;
struct nvmem_config nvmem_cfg = {
.name = "ds1343-",
.word_size = 1,
.stride = 1,
.size = DS1343_NVRAM_LEN,
.reg_read = ds1343_nvram_read,
.reg_write = ds1343_nvram_write,
};
priv = devm_kzalloc(&spi->dev, sizeof(struct ds1343_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* RTC DS1347 works in spi mode 3 and
* its chip select is active high. Active high should be defined as
* "inverse polarity" as GPIO-based chip selects can be logically
* active high but inverted by the GPIO library.
*/
spi->mode |= SPI_MODE_3;
spi->mode ^= SPI_CS_HIGH;
spi->bits_per_word = 8;
res = spi_setup(spi);
if (res)
return res;
spi_set_drvdata(spi, priv);
priv->map = devm_regmap_init_spi(spi, &config);
if (IS_ERR(priv->map)) {
dev_err(&spi->dev, "spi regmap init failed for rtc ds1343\n");
return PTR_ERR(priv->map);
}
res = regmap_read(priv->map, DS1343_SECONDS_REG, &data);
if (res)
return res;
regmap_read(priv->map, DS1343_CONTROL_REG, &data);
data |= DS1343_INTCN;
data &= ~(DS1343_EOSC | DS1343_A1IE | DS1343_A0IE);
regmap_write(priv->map, DS1343_CONTROL_REG, data);
regmap_read(priv->map, DS1343_STATUS_REG, &data);
data &= ~(DS1343_OSF | DS1343_IRQF1 | DS1343_IRQF0);
regmap_write(priv->map, DS1343_STATUS_REG, data);
priv->rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(priv->rtc))
return PTR_ERR(priv->rtc);
priv->rtc->ops = &ds1343_rtc_ops;
priv->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
priv->rtc->range_max = RTC_TIMESTAMP_END_2099;
res = rtc_add_group(priv->rtc, &ds1343_attr_group);
if (res)
dev_err(&spi->dev,
"unable to create sysfs entries for rtc ds1343\n");
res = devm_rtc_register_device(priv->rtc);
if (res)
return res;
nvmem_cfg.priv = priv;
devm_rtc_nvmem_register(priv->rtc, &nvmem_cfg);
priv->irq = spi->irq;
if (priv->irq >= 0) {
res = devm_request_threaded_irq(&spi->dev, spi->irq, NULL,
ds1343_thread, IRQF_ONESHOT,
"ds1343", priv);
if (res) {
priv->irq = -1;
dev_err(&spi->dev,
"unable to request irq for rtc ds1343\n");
} else {
device_init_wakeup(&spi->dev, true);
dev_pm_set_wake_irq(&spi->dev, spi->irq);
}
}
return 0;
}
static void ds1343_remove(struct spi_device *spi)
{
dev_pm_clear_wake_irq(&spi->dev);
}
#ifdef CONFIG_PM_SLEEP
static int ds1343_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
if (spi->irq >= 0 && device_may_wakeup(dev))
enable_irq_wake(spi->irq);
return 0;
}
static int ds1343_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
if (spi->irq >= 0 && device_may_wakeup(dev))
disable_irq_wake(spi->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(ds1343_pm, ds1343_suspend, ds1343_resume);
static struct spi_driver ds1343_driver = {
.driver = {
.name = "ds1343",
.pm = &ds1343_pm,
},
.probe = ds1343_probe,
.remove = ds1343_remove,
.id_table = ds1343_id,
};
module_spi_driver(ds1343_driver);
MODULE_DESCRIPTION("DS1343 RTC SPI Driver");
MODULE_AUTHOR("Raghavendra Chandra Ganiga <[email protected]>,"
"Ankur Srivastava <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-ds1343.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* An i2c driver for the Xicor/Intersil X1205 RTC
* Copyright 2004 Karen Spearel
* Copyright 2005 Alessandro Zummo
*
* please send all reports to:
* Karen Spearel <kas111 at gmail dot com>
* Alessandro Zummo <[email protected]>
*
* based on a lot of other RTC drivers.
*
* Information and datasheet:
* http://www.intersil.com/cda/deviceinfo/0,1477,X1205,00.html
*/
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/bitops.h>
/* offsets into CCR area */
#define CCR_SEC 0
#define CCR_MIN 1
#define CCR_HOUR 2
#define CCR_MDAY 3
#define CCR_MONTH 4
#define CCR_YEAR 5
#define CCR_WDAY 6
#define CCR_Y2K 7
#define X1205_REG_SR 0x3F /* status register */
#define X1205_REG_Y2K 0x37
#define X1205_REG_DW 0x36
#define X1205_REG_YR 0x35
#define X1205_REG_MO 0x34
#define X1205_REG_DT 0x33
#define X1205_REG_HR 0x32
#define X1205_REG_MN 0x31
#define X1205_REG_SC 0x30
#define X1205_REG_DTR 0x13
#define X1205_REG_ATR 0x12
#define X1205_REG_INT 0x11
#define X1205_REG_0 0x10
#define X1205_REG_Y2K1 0x0F
#define X1205_REG_DWA1 0x0E
#define X1205_REG_YRA1 0x0D
#define X1205_REG_MOA1 0x0C
#define X1205_REG_DTA1 0x0B
#define X1205_REG_HRA1 0x0A
#define X1205_REG_MNA1 0x09
#define X1205_REG_SCA1 0x08
#define X1205_REG_Y2K0 0x07
#define X1205_REG_DWA0 0x06
#define X1205_REG_YRA0 0x05
#define X1205_REG_MOA0 0x04
#define X1205_REG_DTA0 0x03
#define X1205_REG_HRA0 0x02
#define X1205_REG_MNA0 0x01
#define X1205_REG_SCA0 0x00
#define X1205_CCR_BASE 0x30 /* Base address of CCR */
#define X1205_ALM0_BASE 0x00 /* Base address of ALARM0 */
#define X1205_SR_RTCF 0x01 /* Clock failure */
#define X1205_SR_WEL 0x02 /* Write Enable Latch */
#define X1205_SR_RWEL 0x04 /* Register Write Enable */
#define X1205_SR_AL0 0x20 /* Alarm 0 match */
#define X1205_DTR_DTR0 0x01
#define X1205_DTR_DTR1 0x02
#define X1205_DTR_DTR2 0x04
#define X1205_HR_MIL 0x80 /* Set in ccr.hour for 24 hr mode */
#define X1205_INT_AL0E 0x20 /* Alarm 0 enable */
static struct i2c_driver x1205_driver;
/*
* In the routines that deal directly with the x1205 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch
* Epoch is initialized as 2000. Time is set to UTC.
*/
static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
unsigned char reg_base)
{
unsigned char dt_addr[2] = { 0, reg_base };
unsigned char buf[8];
int i;
struct i2c_msg msgs[] = {
{/* setup read ptr */
.addr = client->addr,
.len = 2,
.buf = dt_addr
},
{/* read date */
.addr = client->addr,
.flags = I2C_M_RD,
.len = 8,
.buf = buf
},
};
/* read date registers */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev,
"%s: raw read data - sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, mon=%02x, year=%02x, wday=%02x, y2k=%02x\n",
__func__,
buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7]);
/* Mask out the enable bits if these are alarm registers */
if (reg_base < X1205_CCR_BASE)
for (i = 0; i <= 4; i++)
buf[i] &= 0x7F;
tm->tm_sec = bcd2bin(buf[CCR_SEC]);
tm->tm_min = bcd2bin(buf[CCR_MIN]);
tm->tm_hour = bcd2bin(buf[CCR_HOUR] & 0x3F); /* hr is 0-23 */
tm->tm_mday = bcd2bin(buf[CCR_MDAY]);
tm->tm_mon = bcd2bin(buf[CCR_MONTH]) - 1; /* mon is 0-11 */
tm->tm_year = bcd2bin(buf[CCR_YEAR])
+ (bcd2bin(buf[CCR_Y2K]) * 100) - 1900;
tm->tm_wday = buf[CCR_WDAY];
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
return 0;
}
static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
{
static unsigned char sr_addr[2] = { 0, X1205_REG_SR };
struct i2c_msg msgs[] = {
{ /* setup read ptr */
.addr = client->addr,
.len = 2,
.buf = sr_addr
},
{ /* read status */
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = sr
},
};
/* read status register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
return 0;
}
static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
u8 reg_base, unsigned char alm_enable)
{
int i, xfer;
unsigned char rdata[10] = { 0, reg_base };
unsigned char *buf = rdata + 2;
static const unsigned char wel[3] = { 0, X1205_REG_SR,
X1205_SR_WEL };
static const unsigned char rwel[3] = { 0, X1205_REG_SR,
X1205_SR_WEL | X1205_SR_RWEL };
static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 };
dev_dbg(&client->dev,
"%s: sec=%d min=%d hour=%d mday=%d mon=%d year=%d wday=%d\n",
__func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
buf[CCR_SEC] = bin2bcd(tm->tm_sec);
buf[CCR_MIN] = bin2bcd(tm->tm_min);
/* set hour and 24hr bit */
buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL;
buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
/* month, 1 - 12 */
buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
/* year, since the rtc epoch*/
buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
buf[CCR_WDAY] = tm->tm_wday & 0x07;
buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
/* If writing alarm registers, set compare bits on registers 0-4 */
if (reg_base < X1205_CCR_BASE)
for (i = 0; i <= 4; i++)
buf[i] |= 0x80;
/* this sequence is required to unlock the chip */
xfer = i2c_master_send(client, wel, 3);
if (xfer != 3) {
dev_err(&client->dev, "%s: wel - %d\n", __func__, xfer);
return -EIO;
}
xfer = i2c_master_send(client, rwel, 3);
if (xfer != 3) {
dev_err(&client->dev, "%s: rwel - %d\n", __func__, xfer);
return -EIO;
}
xfer = i2c_master_send(client, rdata, sizeof(rdata));
if (xfer != sizeof(rdata)) {
dev_err(&client->dev,
"%s: result=%d addr=%02x, data=%02x\n",
__func__,
xfer, rdata[1], rdata[2]);
return -EIO;
}
/* If we wrote to the nonvolatile region, wait 10msec for write cycle*/
if (reg_base < X1205_CCR_BASE) {
unsigned char al0e[3] = { 0, X1205_REG_INT, 0 };
msleep(10);
/* ...and set or clear the AL0E bit in the INT register */
/* Need to set RWEL again as the write has cleared it */
xfer = i2c_master_send(client, rwel, 3);
if (xfer != 3) {
dev_err(&client->dev,
"%s: aloe rwel - %d\n",
__func__,
xfer);
return -EIO;
}
if (alm_enable)
al0e[2] = X1205_INT_AL0E;
xfer = i2c_master_send(client, al0e, 3);
if (xfer != 3) {
dev_err(&client->dev,
"%s: al0e - %d\n",
__func__,
xfer);
return -EIO;
}
/* and wait 10msec again for this write to complete */
msleep(10);
}
/* disable further writes */
xfer = i2c_master_send(client, diswe, 3);
if (xfer != 3) {
dev_err(&client->dev, "%s: diswe - %d\n", __func__, xfer);
return -EIO;
}
return 0;
}
static int x1205_fix_osc(struct i2c_client *client)
{
int err;
struct rtc_time tm;
memset(&tm, 0, sizeof(tm));
err = x1205_set_datetime(client, &tm, X1205_CCR_BASE, 0);
if (err < 0)
dev_err(&client->dev, "unable to restart the oscillator\n");
return err;
}
static int x1205_get_dtrim(struct i2c_client *client, int *trim)
{
unsigned char dtr;
static unsigned char dtr_addr[2] = { 0, X1205_REG_DTR };
struct i2c_msg msgs[] = {
{ /* setup read ptr */
.addr = client->addr,
.len = 2,
.buf = dtr_addr
},
{ /* read dtr */
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &dtr
},
};
/* read dtr register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev, "%s: raw dtr=%x\n", __func__, dtr);
*trim = 0;
if (dtr & X1205_DTR_DTR0)
*trim += 20;
if (dtr & X1205_DTR_DTR1)
*trim += 10;
if (dtr & X1205_DTR_DTR2)
*trim = -*trim;
return 0;
}
static int x1205_get_atrim(struct i2c_client *client, int *trim)
{
s8 atr;
static unsigned char atr_addr[2] = { 0, X1205_REG_ATR };
struct i2c_msg msgs[] = {
{/* setup read ptr */
.addr = client->addr,
.len = 2,
.buf = atr_addr
},
{/* read atr */
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &atr
},
};
/* read atr register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev, "%s: raw atr=%x\n", __func__, atr);
/* atr is a two's complement value on 6 bits,
* perform sign extension. The formula is
* Catr = (atr * 0.25pF) + 11.00pF.
*/
atr = sign_extend32(atr, 5);
dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __func__, atr, atr);
*trim = (atr * 250) + 11000;
dev_dbg(&client->dev, "%s: real=%d\n", __func__, *trim);
return 0;
}
struct x1205_limit {
unsigned char reg, mask, min, max;
};
static int x1205_validate_client(struct i2c_client *client)
{
int i, xfer;
/* Probe array. We will read the register at the specified
* address and check if the given bits are zero.
*/
static const unsigned char probe_zero_pattern[] = {
/* register, mask */
X1205_REG_SR, 0x18,
X1205_REG_DTR, 0xF8,
X1205_REG_ATR, 0xC0,
X1205_REG_INT, 0x18,
X1205_REG_0, 0xFF,
};
static const struct x1205_limit probe_limits_pattern[] = {
/* register, mask, min, max */
{ X1205_REG_Y2K, 0xFF, 19, 20 },
{ X1205_REG_DW, 0xFF, 0, 6 },
{ X1205_REG_YR, 0xFF, 0, 99 },
{ X1205_REG_MO, 0xFF, 0, 12 },
{ X1205_REG_DT, 0xFF, 0, 31 },
{ X1205_REG_HR, 0x7F, 0, 23 },
{ X1205_REG_MN, 0xFF, 0, 59 },
{ X1205_REG_SC, 0xFF, 0, 59 },
{ X1205_REG_Y2K1, 0xFF, 19, 20 },
{ X1205_REG_Y2K0, 0xFF, 19, 20 },
};
/* check that registers have bits a 0 where expected */
for (i = 0; i < ARRAY_SIZE(probe_zero_pattern); i += 2) {
unsigned char buf;
unsigned char addr[2] = { 0, probe_zero_pattern[i] };
struct i2c_msg msgs[2] = {
{
.addr = client->addr,
.len = 2,
.buf = addr
},
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &buf
},
};
xfer = i2c_transfer(client->adapter, msgs, 2);
if (xfer != 2) {
dev_err(&client->dev,
"%s: could not read register %x\n",
__func__, probe_zero_pattern[i]);
return -EIO;
}
if ((buf & probe_zero_pattern[i+1]) != 0) {
dev_err(&client->dev,
"%s: register=%02x, zero pattern=%d, value=%x\n",
__func__, probe_zero_pattern[i], i, buf);
return -ENODEV;
}
}
/* check limits (only registers with bcd values) */
for (i = 0; i < ARRAY_SIZE(probe_limits_pattern); i++) {
unsigned char reg, value;
unsigned char addr[2] = { 0, probe_limits_pattern[i].reg };
struct i2c_msg msgs[2] = {
{
.addr = client->addr,
.len = 2,
.buf = addr
},
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ®
},
};
xfer = i2c_transfer(client->adapter, msgs, 2);
if (xfer != 2) {
dev_err(&client->dev,
"%s: could not read register %x\n",
__func__, probe_limits_pattern[i].reg);
return -EIO;
}
value = bcd2bin(reg & probe_limits_pattern[i].mask);
if (value > probe_limits_pattern[i].max ||
value < probe_limits_pattern[i].min) {
dev_dbg(&client->dev,
"%s: register=%x, lim pattern=%d, value=%d\n",
__func__, probe_limits_pattern[i].reg,
i, value);
return -ENODEV;
}
}
return 0;
}
static int x1205_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
int err;
unsigned char intreg, status;
static unsigned char int_addr[2] = { 0, X1205_REG_INT };
struct i2c_client *client = to_i2c_client(dev);
struct i2c_msg msgs[] = {
{ /* setup read ptr */
.addr = client->addr,
.len = 2,
.buf = int_addr
},
{/* read INT register */
.addr = client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = &intreg
},
};
/* read interrupt register and status register */
if (i2c_transfer(client->adapter, &msgs[0], 2) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
err = x1205_get_status(client, &status);
if (err == 0) {
alrm->pending = (status & X1205_SR_AL0) ? 1 : 0;
alrm->enabled = (intreg & X1205_INT_AL0E) ? 1 : 0;
err = x1205_get_datetime(client, &alrm->time, X1205_ALM0_BASE);
}
return err;
}
static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
return x1205_set_datetime(to_i2c_client(dev),
&alrm->time, X1205_ALM0_BASE, alrm->enabled);
}
static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return x1205_get_datetime(to_i2c_client(dev),
tm, X1205_CCR_BASE);
}
static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return x1205_set_datetime(to_i2c_client(dev),
tm, X1205_CCR_BASE, 0);
}
static int x1205_rtc_proc(struct device *dev, struct seq_file *seq)
{
int err, dtrim, atrim;
err = x1205_get_dtrim(to_i2c_client(dev), &dtrim);
if (!err)
seq_printf(seq, "digital_trim\t: %d ppm\n", dtrim);
err = x1205_get_atrim(to_i2c_client(dev), &atrim);
if (!err)
seq_printf(seq, "analog_trim\t: %d.%02d pF\n",
atrim / 1000, atrim % 1000);
return 0;
}
static const struct rtc_class_ops x1205_rtc_ops = {
.proc = x1205_rtc_proc,
.read_time = x1205_rtc_read_time,
.set_time = x1205_rtc_set_time,
.read_alarm = x1205_rtc_read_alarm,
.set_alarm = x1205_rtc_set_alarm,
};
static ssize_t x1205_sysfs_show_atrim(struct device *dev,
struct device_attribute *attr, char *buf)
{
int err, atrim;
err = x1205_get_atrim(to_i2c_client(dev), &atrim);
if (err)
return err;
return sprintf(buf, "%d.%02d pF\n", atrim / 1000, atrim % 1000);
}
static DEVICE_ATTR(atrim, S_IRUGO, x1205_sysfs_show_atrim, NULL);
static ssize_t x1205_sysfs_show_dtrim(struct device *dev,
struct device_attribute *attr, char *buf)
{
int err, dtrim;
err = x1205_get_dtrim(to_i2c_client(dev), &dtrim);
if (err)
return err;
return sprintf(buf, "%d ppm\n", dtrim);
}
static DEVICE_ATTR(dtrim, S_IRUGO, x1205_sysfs_show_dtrim, NULL);
static int x1205_sysfs_register(struct device *dev)
{
int err;
err = device_create_file(dev, &dev_attr_atrim);
if (err)
return err;
err = device_create_file(dev, &dev_attr_dtrim);
if (err)
device_remove_file(dev, &dev_attr_atrim);
return err;
}
static void x1205_sysfs_unregister(struct device *dev)
{
device_remove_file(dev, &dev_attr_atrim);
device_remove_file(dev, &dev_attr_dtrim);
}
static int x1205_probe(struct i2c_client *client)
{
int err = 0;
unsigned char sr;
struct rtc_device *rtc;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
if (x1205_validate_client(client) < 0)
return -ENODEV;
rtc = devm_rtc_device_register(&client->dev, x1205_driver.driver.name,
&x1205_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
/* Check for power failures and eventually enable the osc */
err = x1205_get_status(client, &sr);
if (!err) {
if (sr & X1205_SR_RTCF) {
dev_err(&client->dev,
"power failure detected, "
"please set the clock\n");
udelay(50);
x1205_fix_osc(client);
}
} else {
dev_err(&client->dev, "couldn't read status\n");
}
err = x1205_sysfs_register(&client->dev);
if (err)
dev_err(&client->dev, "Unable to create sysfs entries\n");
return 0;
}
static void x1205_remove(struct i2c_client *client)
{
x1205_sysfs_unregister(&client->dev);
}
static const struct i2c_device_id x1205_id[] = {
{ "x1205", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, x1205_id);
static const struct of_device_id x1205_dt_ids[] = {
{ .compatible = "xircom,x1205", },
{},
};
MODULE_DEVICE_TABLE(of, x1205_dt_ids);
static struct i2c_driver x1205_driver = {
.driver = {
.name = "rtc-x1205",
.of_match_table = x1205_dt_ids,
},
.probe = x1205_probe,
.remove = x1205_remove,
.id_table = x1205_id,
};
module_i2c_driver(x1205_driver);
MODULE_AUTHOR(
"Karen Spearel <kas111 at gmail dot com>, "
"Alessandro Zummo <[email protected]>");
MODULE_DESCRIPTION("Xicor/Intersil X1205 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-x1205.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* rtc-as3722.c - Real Time Clock driver for ams AS3722 PMICs
*
* Copyright (C) 2013 ams AG
* Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
*
* Author: Florian Lobmaier <[email protected]>
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/bcd.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/as3722.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/time.h>
#define AS3722_RTC_START_YEAR 2000
struct as3722_rtc {
struct rtc_device *rtc;
struct device *dev;
struct as3722 *as3722;
int alarm_irq;
bool irq_enable;
};
static void as3722_time_to_reg(u8 *rbuff, struct rtc_time *tm)
{
rbuff[0] = bin2bcd(tm->tm_sec);
rbuff[1] = bin2bcd(tm->tm_min);
rbuff[2] = bin2bcd(tm->tm_hour);
rbuff[3] = bin2bcd(tm->tm_mday);
rbuff[4] = bin2bcd(tm->tm_mon + 1);
rbuff[5] = bin2bcd(tm->tm_year - (AS3722_RTC_START_YEAR - 1900));
}
static void as3722_reg_to_time(u8 *rbuff, struct rtc_time *tm)
{
tm->tm_sec = bcd2bin(rbuff[0] & 0x7F);
tm->tm_min = bcd2bin(rbuff[1] & 0x7F);
tm->tm_hour = bcd2bin(rbuff[2] & 0x3F);
tm->tm_mday = bcd2bin(rbuff[3] & 0x3F);
tm->tm_mon = bcd2bin(rbuff[4] & 0x1F) - 1;
tm->tm_year = (AS3722_RTC_START_YEAR - 1900) + bcd2bin(rbuff[5] & 0x7F);
return;
}
static int as3722_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
struct as3722 *as3722 = as3722_rtc->as3722;
u8 as_time_array[6];
int ret;
ret = as3722_block_read(as3722, AS3722_RTC_SECOND_REG,
6, as_time_array);
if (ret < 0) {
dev_err(dev, "RTC_SECOND reg block read failed %d\n", ret);
return ret;
}
as3722_reg_to_time(as_time_array, tm);
return 0;
}
static int as3722_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
struct as3722 *as3722 = as3722_rtc->as3722;
u8 as_time_array[6];
int ret;
if (tm->tm_year < (AS3722_RTC_START_YEAR - 1900))
return -EINVAL;
as3722_time_to_reg(as_time_array, tm);
ret = as3722_block_write(as3722, AS3722_RTC_SECOND_REG, 6,
as_time_array);
if (ret < 0)
dev_err(dev, "RTC_SECOND reg block write failed %d\n", ret);
return ret;
}
static int as3722_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
if (enabled && !as3722_rtc->irq_enable) {
enable_irq(as3722_rtc->alarm_irq);
as3722_rtc->irq_enable = true;
} else if (!enabled && as3722_rtc->irq_enable) {
disable_irq(as3722_rtc->alarm_irq);
as3722_rtc->irq_enable = false;
}
return 0;
}
static int as3722_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
struct as3722 *as3722 = as3722_rtc->as3722;
u8 as_time_array[6];
int ret;
ret = as3722_block_read(as3722, AS3722_RTC_ALARM_SECOND_REG, 6,
as_time_array);
if (ret < 0) {
dev_err(dev, "RTC_ALARM_SECOND block read failed %d\n", ret);
return ret;
}
as3722_reg_to_time(as_time_array, &alrm->time);
return 0;
}
static int as3722_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
struct as3722 *as3722 = as3722_rtc->as3722;
u8 as_time_array[6];
int ret;
if (alrm->time.tm_year < (AS3722_RTC_START_YEAR - 1900))
return -EINVAL;
ret = as3722_rtc_alarm_irq_enable(dev, 0);
if (ret < 0) {
dev_err(dev, "Disable RTC alarm failed\n");
return ret;
}
as3722_time_to_reg(as_time_array, &alrm->time);
ret = as3722_block_write(as3722, AS3722_RTC_ALARM_SECOND_REG, 6,
as_time_array);
if (ret < 0) {
dev_err(dev, "RTC_ALARM_SECOND block write failed %d\n", ret);
return ret;
}
if (alrm->enabled)
ret = as3722_rtc_alarm_irq_enable(dev, alrm->enabled);
return ret;
}
static irqreturn_t as3722_alarm_irq(int irq, void *data)
{
struct as3722_rtc *as3722_rtc = data;
rtc_update_irq(as3722_rtc->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops as3722_rtc_ops = {
.read_time = as3722_rtc_read_time,
.set_time = as3722_rtc_set_time,
.read_alarm = as3722_rtc_read_alarm,
.set_alarm = as3722_rtc_set_alarm,
.alarm_irq_enable = as3722_rtc_alarm_irq_enable,
};
static int as3722_rtc_probe(struct platform_device *pdev)
{
struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent);
struct as3722_rtc *as3722_rtc;
int ret;
as3722_rtc = devm_kzalloc(&pdev->dev, sizeof(*as3722_rtc), GFP_KERNEL);
if (!as3722_rtc)
return -ENOMEM;
as3722_rtc->as3722 = as3722;
as3722_rtc->dev = &pdev->dev;
platform_set_drvdata(pdev, as3722_rtc);
/* Enable the RTC to make sure it is running. */
ret = as3722_update_bits(as3722, AS3722_RTC_CONTROL_REG,
AS3722_RTC_ON | AS3722_RTC_ALARM_WAKEUP_EN,
AS3722_RTC_ON | AS3722_RTC_ALARM_WAKEUP_EN);
if (ret < 0) {
dev_err(&pdev->dev, "RTC_CONTROL reg write failed: %d\n", ret);
return ret;
}
device_init_wakeup(&pdev->dev, 1);
as3722_rtc->rtc = devm_rtc_device_register(&pdev->dev, "as3722-rtc",
&as3722_rtc_ops, THIS_MODULE);
if (IS_ERR(as3722_rtc->rtc)) {
ret = PTR_ERR(as3722_rtc->rtc);
dev_err(&pdev->dev, "RTC register failed: %d\n", ret);
return ret;
}
as3722_rtc->alarm_irq = platform_get_irq(pdev, 0);
dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq);
ret = devm_request_threaded_irq(&pdev->dev, as3722_rtc->alarm_irq, NULL,
as3722_alarm_irq, IRQF_ONESHOT,
"rtc-alarm", as3722_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
as3722_rtc->alarm_irq, ret);
return ret;
}
disable_irq(as3722_rtc->alarm_irq);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int as3722_rtc_suspend(struct device *dev)
{
struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(as3722_rtc->alarm_irq);
return 0;
}
static int as3722_rtc_resume(struct device *dev)
{
struct as3722_rtc *as3722_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(as3722_rtc->alarm_irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(as3722_rtc_pm_ops, as3722_rtc_suspend,
as3722_rtc_resume);
static struct platform_driver as3722_rtc_driver = {
.probe = as3722_rtc_probe,
.driver = {
.name = "as3722-rtc",
.pm = &as3722_rtc_pm_ops,
},
};
module_platform_driver(as3722_rtc_driver);
MODULE_DESCRIPTION("RTC driver for AS3722 PMICs");
MODULE_ALIAS("platform:as3722-rtc");
MODULE_AUTHOR("Florian Lobmaier <[email protected]>");
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-as3722.c |
// SPDX-License-Identifier: GPL-2.0
/* drivers/rtc/rtc-goldfish.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2017 Imagination Technologies Ltd.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/goldfish.h>
#include <clocksource/timer-goldfish.h>
struct goldfish_rtc {
void __iomem *base;
int irq;
struct rtc_device *rtc;
};
static int goldfish_rtc_read_alarm(struct device *dev,
struct rtc_wkalrm *alrm)
{
u64 rtc_alarm;
u64 rtc_alarm_low;
u64 rtc_alarm_high;
void __iomem *base;
struct goldfish_rtc *rtcdrv;
rtcdrv = dev_get_drvdata(dev);
base = rtcdrv->base;
rtc_alarm_low = gf_ioread32(base + TIMER_ALARM_LOW);
rtc_alarm_high = gf_ioread32(base + TIMER_ALARM_HIGH);
rtc_alarm = (rtc_alarm_high << 32) | rtc_alarm_low;
do_div(rtc_alarm, NSEC_PER_SEC);
memset(alrm, 0, sizeof(struct rtc_wkalrm));
rtc_time64_to_tm(rtc_alarm, &alrm->time);
if (gf_ioread32(base + TIMER_ALARM_STATUS))
alrm->enabled = 1;
else
alrm->enabled = 0;
return 0;
}
static int goldfish_rtc_set_alarm(struct device *dev,
struct rtc_wkalrm *alrm)
{
struct goldfish_rtc *rtcdrv;
u64 rtc_alarm64;
u64 rtc_status_reg;
void __iomem *base;
rtcdrv = dev_get_drvdata(dev);
base = rtcdrv->base;
if (alrm->enabled) {
rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
gf_iowrite32((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
gf_iowrite32(rtc_alarm64, base + TIMER_ALARM_LOW);
gf_iowrite32(1, base + TIMER_IRQ_ENABLED);
} else {
/*
* if this function was called with enabled=0
* then it could mean that the application is
* trying to cancel an ongoing alarm
*/
rtc_status_reg = gf_ioread32(base + TIMER_ALARM_STATUS);
if (rtc_status_reg)
gf_iowrite32(1, base + TIMER_CLEAR_ALARM);
}
return 0;
}
static int goldfish_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
void __iomem *base;
struct goldfish_rtc *rtcdrv;
rtcdrv = dev_get_drvdata(dev);
base = rtcdrv->base;
if (enabled)
gf_iowrite32(1, base + TIMER_IRQ_ENABLED);
else
gf_iowrite32(0, base + TIMER_IRQ_ENABLED);
return 0;
}
static irqreturn_t goldfish_rtc_interrupt(int irq, void *dev_id)
{
struct goldfish_rtc *rtcdrv = dev_id;
void __iomem *base = rtcdrv->base;
gf_iowrite32(1, base + TIMER_CLEAR_INTERRUPT);
rtc_update_irq(rtcdrv->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static int goldfish_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct goldfish_rtc *rtcdrv;
void __iomem *base;
u64 time_high;
u64 time_low;
u64 time;
rtcdrv = dev_get_drvdata(dev);
base = rtcdrv->base;
time_low = gf_ioread32(base + TIMER_TIME_LOW);
time_high = gf_ioread32(base + TIMER_TIME_HIGH);
time = (time_high << 32) | time_low;
do_div(time, NSEC_PER_SEC);
rtc_time64_to_tm(time, tm);
return 0;
}
static int goldfish_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct goldfish_rtc *rtcdrv;
void __iomem *base;
u64 now64;
rtcdrv = dev_get_drvdata(dev);
base = rtcdrv->base;
now64 = rtc_tm_to_time64(tm) * NSEC_PER_SEC;
gf_iowrite32((now64 >> 32), base + TIMER_TIME_HIGH);
gf_iowrite32(now64, base + TIMER_TIME_LOW);
return 0;
}
static const struct rtc_class_ops goldfish_rtc_ops = {
.read_time = goldfish_rtc_read_time,
.set_time = goldfish_rtc_set_time,
.read_alarm = goldfish_rtc_read_alarm,
.set_alarm = goldfish_rtc_set_alarm,
.alarm_irq_enable = goldfish_rtc_alarm_irq_enable
};
static int goldfish_rtc_probe(struct platform_device *pdev)
{
struct goldfish_rtc *rtcdrv;
int err;
rtcdrv = devm_kzalloc(&pdev->dev, sizeof(*rtcdrv), GFP_KERNEL);
if (!rtcdrv)
return -ENOMEM;
platform_set_drvdata(pdev, rtcdrv);
rtcdrv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtcdrv->base))
return PTR_ERR(rtcdrv->base);
rtcdrv->irq = platform_get_irq(pdev, 0);
if (rtcdrv->irq < 0)
return -ENODEV;
rtcdrv->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtcdrv->rtc))
return PTR_ERR(rtcdrv->rtc);
rtcdrv->rtc->ops = &goldfish_rtc_ops;
rtcdrv->rtc->range_max = U64_MAX / NSEC_PER_SEC;
err = devm_request_irq(&pdev->dev, rtcdrv->irq,
goldfish_rtc_interrupt,
0, pdev->name, rtcdrv);
if (err)
return err;
return devm_rtc_register_device(rtcdrv->rtc);
}
static const struct of_device_id goldfish_rtc_of_match[] = {
{ .compatible = "google,goldfish-rtc", },
{},
};
MODULE_DEVICE_TABLE(of, goldfish_rtc_of_match);
static struct platform_driver goldfish_rtc = {
.probe = goldfish_rtc_probe,
.driver = {
.name = "goldfish_rtc",
.of_match_table = goldfish_rtc_of_match,
}
};
module_platform_driver(goldfish_rtc);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-goldfish.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for ST M41T94 SPI RTC
*
* Copyright (C) 2008 Kim B. Heino
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
#define M41T94_REG_SECONDS 0x01
#define M41T94_REG_MINUTES 0x02
#define M41T94_REG_HOURS 0x03
#define M41T94_REG_WDAY 0x04
#define M41T94_REG_DAY 0x05
#define M41T94_REG_MONTH 0x06
#define M41T94_REG_YEAR 0x07
#define M41T94_REG_HT 0x0c
#define M41T94_BIT_HALT 0x40
#define M41T94_BIT_STOP 0x80
#define M41T94_BIT_CB 0x40
#define M41T94_BIT_CEB 0x80
static int m41t94_set_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
u8 buf[8]; /* write cmd + 7 registers */
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"write", tm->tm_sec, tm->tm_min,
tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
buf[0] = 0x80 | M41T94_REG_SECONDS; /* write time + date */
buf[M41T94_REG_SECONDS] = bin2bcd(tm->tm_sec);
buf[M41T94_REG_MINUTES] = bin2bcd(tm->tm_min);
buf[M41T94_REG_HOURS] = bin2bcd(tm->tm_hour);
buf[M41T94_REG_WDAY] = bin2bcd(tm->tm_wday + 1);
buf[M41T94_REG_DAY] = bin2bcd(tm->tm_mday);
buf[M41T94_REG_MONTH] = bin2bcd(tm->tm_mon + 1);
buf[M41T94_REG_HOURS] |= M41T94_BIT_CEB;
if (tm->tm_year >= 100)
buf[M41T94_REG_HOURS] |= M41T94_BIT_CB;
buf[M41T94_REG_YEAR] = bin2bcd(tm->tm_year % 100);
return spi_write(spi, buf, 8);
}
static int m41t94_read_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
u8 buf[2];
int ret, hour;
/* clear halt update bit */
ret = spi_w8r8(spi, M41T94_REG_HT);
if (ret < 0)
return ret;
if (ret & M41T94_BIT_HALT) {
buf[0] = 0x80 | M41T94_REG_HT;
buf[1] = ret & ~M41T94_BIT_HALT;
spi_write(spi, buf, 2);
}
/* clear stop bit */
ret = spi_w8r8(spi, M41T94_REG_SECONDS);
if (ret < 0)
return ret;
if (ret & M41T94_BIT_STOP) {
buf[0] = 0x80 | M41T94_REG_SECONDS;
buf[1] = ret & ~M41T94_BIT_STOP;
spi_write(spi, buf, 2);
}
tm->tm_sec = bcd2bin(spi_w8r8(spi, M41T94_REG_SECONDS));
tm->tm_min = bcd2bin(spi_w8r8(spi, M41T94_REG_MINUTES));
hour = spi_w8r8(spi, M41T94_REG_HOURS);
tm->tm_hour = bcd2bin(hour & 0x3f);
tm->tm_wday = bcd2bin(spi_w8r8(spi, M41T94_REG_WDAY)) - 1;
tm->tm_mday = bcd2bin(spi_w8r8(spi, M41T94_REG_DAY));
tm->tm_mon = bcd2bin(spi_w8r8(spi, M41T94_REG_MONTH)) - 1;
tm->tm_year = bcd2bin(spi_w8r8(spi, M41T94_REG_YEAR));
if ((hour & M41T94_BIT_CB) || !(hour & M41T94_BIT_CEB))
tm->tm_year += 100;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read", tm->tm_sec, tm->tm_min,
tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
return 0;
}
static const struct rtc_class_ops m41t94_rtc_ops = {
.read_time = m41t94_read_time,
.set_time = m41t94_set_time,
};
static struct spi_driver m41t94_driver;
static int m41t94_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
int res;
spi->bits_per_word = 8;
spi_setup(spi);
res = spi_w8r8(spi, M41T94_REG_SECONDS);
if (res < 0) {
dev_err(&spi->dev, "not found.\n");
return res;
}
rtc = devm_rtc_device_register(&spi->dev, m41t94_driver.driver.name,
&m41t94_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
spi_set_drvdata(spi, rtc);
return 0;
}
static struct spi_driver m41t94_driver = {
.driver = {
.name = "rtc-m41t94",
},
.probe = m41t94_probe,
};
module_spi_driver(m41t94_driver);
MODULE_AUTHOR("Kim B. Heino <[email protected]>");
MODULE_DESCRIPTION("Driver for ST M41T94 SPI RTC");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:rtc-m41t94");
| linux-master | drivers/rtc/rtc-m41t94.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* An RTC driver for Allwinner A31/A23
*
* Copyright (c) 2014, Chen-Yu Tsai <[email protected]>
*
* based on rtc-sunxi.c
*
* An RTC driver for Allwinner A10/A20
*
* Copyright (c) 2013, Carlo Caione <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/sunxi-ng.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/types.h>
/* Control register */
#define SUN6I_LOSC_CTRL 0x0000
#define SUN6I_LOSC_CTRL_KEY (0x16aa << 16)
#define SUN6I_LOSC_CTRL_AUTO_SWT_BYPASS BIT(15)
#define SUN6I_LOSC_CTRL_ALM_DHMS_ACC BIT(9)
#define SUN6I_LOSC_CTRL_RTC_HMS_ACC BIT(8)
#define SUN6I_LOSC_CTRL_RTC_YMD_ACC BIT(7)
#define SUN6I_LOSC_CTRL_EXT_LOSC_EN BIT(4)
#define SUN6I_LOSC_CTRL_EXT_OSC BIT(0)
#define SUN6I_LOSC_CTRL_ACC_MASK GENMASK(9, 7)
#define SUN6I_LOSC_CLK_PRESCAL 0x0008
/* RTC */
#define SUN6I_RTC_YMD 0x0010
#define SUN6I_RTC_HMS 0x0014
/* Alarm 0 (counter) */
#define SUN6I_ALRM_COUNTER 0x0020
/* This holds the remaining alarm seconds on older SoCs (current value) */
#define SUN6I_ALRM_COUNTER_HMS 0x0024
#define SUN6I_ALRM_EN 0x0028
#define SUN6I_ALRM_EN_CNT_EN BIT(0)
#define SUN6I_ALRM_IRQ_EN 0x002c
#define SUN6I_ALRM_IRQ_EN_CNT_IRQ_EN BIT(0)
#define SUN6I_ALRM_IRQ_STA 0x0030
#define SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND BIT(0)
/* Alarm 1 (wall clock) */
#define SUN6I_ALRM1_EN 0x0044
#define SUN6I_ALRM1_IRQ_EN 0x0048
#define SUN6I_ALRM1_IRQ_STA 0x004c
#define SUN6I_ALRM1_IRQ_STA_WEEK_IRQ_PEND BIT(0)
/* Alarm config */
#define SUN6I_ALARM_CONFIG 0x0050
#define SUN6I_ALARM_CONFIG_WAKEUP BIT(0)
#define SUN6I_LOSC_OUT_GATING 0x0060
#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0
/* General-purpose data */
#define SUN6I_GP_DATA 0x0100
#define SUN6I_GP_DATA_SIZE 0x20
/*
* Get date values
*/
#define SUN6I_DATE_GET_DAY_VALUE(x) ((x) & 0x0000001f)
#define SUN6I_DATE_GET_MON_VALUE(x) (((x) & 0x00000f00) >> 8)
#define SUN6I_DATE_GET_YEAR_VALUE(x) (((x) & 0x003f0000) >> 16)
#define SUN6I_LEAP_GET_VALUE(x) (((x) & 0x00400000) >> 22)
/*
* Get time values
*/
#define SUN6I_TIME_GET_SEC_VALUE(x) ((x) & 0x0000003f)
#define SUN6I_TIME_GET_MIN_VALUE(x) (((x) & 0x00003f00) >> 8)
#define SUN6I_TIME_GET_HOUR_VALUE(x) (((x) & 0x001f0000) >> 16)
/*
* Set date values
*/
#define SUN6I_DATE_SET_DAY_VALUE(x) ((x) & 0x0000001f)
#define SUN6I_DATE_SET_MON_VALUE(x) ((x) << 8 & 0x00000f00)
#define SUN6I_DATE_SET_YEAR_VALUE(x) ((x) << 16 & 0x003f0000)
#define SUN6I_LEAP_SET_VALUE(x) ((x) << 22 & 0x00400000)
/*
* Set time values
*/
#define SUN6I_TIME_SET_SEC_VALUE(x) ((x) & 0x0000003f)
#define SUN6I_TIME_SET_MIN_VALUE(x) ((x) << 8 & 0x00003f00)
#define SUN6I_TIME_SET_HOUR_VALUE(x) ((x) << 16 & 0x001f0000)
/*
* The year parameter passed to the driver is usually an offset relative to
* the year 1900. This macro is used to convert this offset to another one
* relative to the minimum year allowed by the hardware.
*
* The year range is 1970 - 2033. This range is selected to match Allwinner's
* driver, even though it is somewhat limited.
*/
#define SUN6I_YEAR_MIN 1970
#define SUN6I_YEAR_OFF (SUN6I_YEAR_MIN - 1900)
#define SECS_PER_DAY (24 * 3600ULL)
/*
* There are other differences between models, including:
*
* - number of GPIO pins that can be configured to hold a certain level
* - crypto-key related registers (H5, H6)
* - boot process related (super standby, secondary processor entry address)
* registers (R40, H6)
* - SYS power domain controls (R40)
* - DCXO controls (H6)
* - RC oscillator calibration (H6)
*
* These functions are not covered by this driver.
*/
struct sun6i_rtc_clk_data {
unsigned long rc_osc_rate;
unsigned int fixed_prescaler : 16;
unsigned int has_prescaler : 1;
unsigned int has_out_clk : 1;
unsigned int has_losc_en : 1;
unsigned int has_auto_swt : 1;
};
#define RTC_LINEAR_DAY BIT(0)
struct sun6i_rtc_dev {
struct rtc_device *rtc;
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
time64_t alarm;
unsigned long flags;
struct clk_hw hw;
struct clk_hw *int_osc;
struct clk *losc;
struct clk *ext_losc;
spinlock_t lock;
};
static struct sun6i_rtc_dev *sun6i_rtc;
static unsigned long sun6i_rtc_osc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sun6i_rtc_dev *rtc = container_of(hw, struct sun6i_rtc_dev, hw);
u32 val = 0;
val = readl(rtc->base + SUN6I_LOSC_CTRL);
if (val & SUN6I_LOSC_CTRL_EXT_OSC)
return parent_rate;
if (rtc->data->fixed_prescaler)
parent_rate /= rtc->data->fixed_prescaler;
if (rtc->data->has_prescaler) {
val = readl(rtc->base + SUN6I_LOSC_CLK_PRESCAL);
val &= GENMASK(4, 0);
}
return parent_rate / (val + 1);
}
static u8 sun6i_rtc_osc_get_parent(struct clk_hw *hw)
{
struct sun6i_rtc_dev *rtc = container_of(hw, struct sun6i_rtc_dev, hw);
return readl(rtc->base + SUN6I_LOSC_CTRL) & SUN6I_LOSC_CTRL_EXT_OSC;
}
static int sun6i_rtc_osc_set_parent(struct clk_hw *hw, u8 index)
{
struct sun6i_rtc_dev *rtc = container_of(hw, struct sun6i_rtc_dev, hw);
unsigned long flags;
u32 val;
if (index > 1)
return -EINVAL;
spin_lock_irqsave(&rtc->lock, flags);
val = readl(rtc->base + SUN6I_LOSC_CTRL);
val &= ~SUN6I_LOSC_CTRL_EXT_OSC;
val |= SUN6I_LOSC_CTRL_KEY;
val |= index ? SUN6I_LOSC_CTRL_EXT_OSC : 0;
if (rtc->data->has_losc_en) {
val &= ~SUN6I_LOSC_CTRL_EXT_LOSC_EN;
val |= index ? SUN6I_LOSC_CTRL_EXT_LOSC_EN : 0;
}
writel(val, rtc->base + SUN6I_LOSC_CTRL);
spin_unlock_irqrestore(&rtc->lock, flags);
return 0;
}
static const struct clk_ops sun6i_rtc_osc_ops = {
.recalc_rate = sun6i_rtc_osc_recalc_rate,
.determine_rate = clk_hw_determine_rate_no_reparent,
.get_parent = sun6i_rtc_osc_get_parent,
.set_parent = sun6i_rtc_osc_set_parent,
};
static void __init sun6i_rtc_clk_init(struct device_node *node,
const struct sun6i_rtc_clk_data *data)
{
struct clk_hw_onecell_data *clk_data;
struct sun6i_rtc_dev *rtc;
struct clk_init_data init = {
.ops = &sun6i_rtc_osc_ops,
.name = "losc",
};
const char *iosc_name = "rtc-int-osc";
const char *clkout_name = "osc32k-out";
const char *parents[2];
u32 reg;
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return;
rtc->data = data;
clk_data = kzalloc(struct_size(clk_data, hws, 3), GFP_KERNEL);
if (!clk_data) {
kfree(rtc);
return;
}
spin_lock_init(&rtc->lock);
rtc->base = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(rtc->base)) {
pr_crit("Can't map RTC registers");
goto err;
}
reg = SUN6I_LOSC_CTRL_KEY;
if (rtc->data->has_auto_swt) {
/* Bypass auto-switch to int osc, on ext losc failure */
reg |= SUN6I_LOSC_CTRL_AUTO_SWT_BYPASS;
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
}
/* Switch to the external, more precise, oscillator, if present */
if (of_property_present(node, "clocks")) {
reg |= SUN6I_LOSC_CTRL_EXT_OSC;
if (rtc->data->has_losc_en)
reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN;
}
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
/* Yes, I know, this is ugly. */
sun6i_rtc = rtc;
of_property_read_string_index(node, "clock-output-names", 2,
&iosc_name);
rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL,
iosc_name,
NULL, 0,
rtc->data->rc_osc_rate,
300000000);
if (IS_ERR(rtc->int_osc)) {
pr_crit("Couldn't register the internal oscillator\n");
goto err;
}
parents[0] = clk_hw_get_name(rtc->int_osc);
/* If there is no external oscillator, this will be NULL and ... */
parents[1] = of_clk_get_parent_name(node, 0);
rtc->hw.init = &init;
init.parent_names = parents;
/* ... number of clock parents will be 1. */
init.num_parents = of_clk_get_parent_count(node) + 1;
of_property_read_string_index(node, "clock-output-names", 0,
&init.name);
rtc->losc = clk_register(NULL, &rtc->hw);
if (IS_ERR(rtc->losc)) {
pr_crit("Couldn't register the LOSC clock\n");
goto err_register;
}
of_property_read_string_index(node, "clock-output-names", 1,
&clkout_name);
rtc->ext_losc = clk_register_gate(NULL, clkout_name, init.name,
0, rtc->base + SUN6I_LOSC_OUT_GATING,
SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0,
&rtc->lock);
if (IS_ERR(rtc->ext_losc)) {
pr_crit("Couldn't register the LOSC external gate\n");
goto err_register;
}
clk_data->num = 3;
clk_data->hws[0] = &rtc->hw;
clk_data->hws[1] = __clk_get_hw(rtc->ext_losc);
clk_data->hws[2] = rtc->int_osc;
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return;
err_register:
clk_hw_unregister_fixed_rate(rtc->int_osc);
err:
kfree(clk_data);
}
static const struct sun6i_rtc_clk_data sun6i_a31_rtc_data = {
.rc_osc_rate = 667000, /* datasheet says 600 ~ 700 KHz */
.has_prescaler = 1,
};
static void __init sun6i_a31_rtc_clk_init(struct device_node *node)
{
sun6i_rtc_clk_init(node, &sun6i_a31_rtc_data);
}
CLK_OF_DECLARE_DRIVER(sun6i_a31_rtc_clk, "allwinner,sun6i-a31-rtc",
sun6i_a31_rtc_clk_init);
static const struct sun6i_rtc_clk_data sun8i_a23_rtc_data = {
.rc_osc_rate = 667000, /* datasheet says 600 ~ 700 KHz */
.has_prescaler = 1,
.has_out_clk = 1,
};
static void __init sun8i_a23_rtc_clk_init(struct device_node *node)
{
sun6i_rtc_clk_init(node, &sun8i_a23_rtc_data);
}
CLK_OF_DECLARE_DRIVER(sun8i_a23_rtc_clk, "allwinner,sun8i-a23-rtc",
sun8i_a23_rtc_clk_init);
static const struct sun6i_rtc_clk_data sun8i_h3_rtc_data = {
.rc_osc_rate = 16000000,
.fixed_prescaler = 32,
.has_prescaler = 1,
.has_out_clk = 1,
};
static void __init sun8i_h3_rtc_clk_init(struct device_node *node)
{
sun6i_rtc_clk_init(node, &sun8i_h3_rtc_data);
}
CLK_OF_DECLARE_DRIVER(sun8i_h3_rtc_clk, "allwinner,sun8i-h3-rtc",
sun8i_h3_rtc_clk_init);
/* As far as we are concerned, clocks for H5 are the same as H3 */
CLK_OF_DECLARE_DRIVER(sun50i_h5_rtc_clk, "allwinner,sun50i-h5-rtc",
sun8i_h3_rtc_clk_init);
static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
.rc_osc_rate = 16000000,
.fixed_prescaler = 32,
.has_prescaler = 1,
.has_out_clk = 1,
.has_losc_en = 1,
.has_auto_swt = 1,
};
static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
{
sun6i_rtc_clk_init(node, &sun50i_h6_rtc_data);
}
CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
sun50i_h6_rtc_clk_init);
/*
* The R40 user manual is self-conflicting on whether the prescaler is
* fixed or configurable. The clock diagram shows it as fixed, but there
* is also a configurable divider in the RTC block.
*/
static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = {
.rc_osc_rate = 16000000,
.fixed_prescaler = 512,
};
static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
{
sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
}
CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
sun8i_r40_rtc_clk_init);
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000,
.has_out_clk = 1,
};
static void __init sun8i_v3_rtc_clk_init(struct device_node *node)
{
sun6i_rtc_clk_init(node, &sun8i_v3_rtc_data);
}
CLK_OF_DECLARE_DRIVER(sun8i_v3_rtc_clk, "allwinner,sun8i-v3-rtc",
sun8i_v3_rtc_clk_init);
static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id)
{
struct sun6i_rtc_dev *chip = (struct sun6i_rtc_dev *) id;
irqreturn_t ret = IRQ_NONE;
u32 val;
spin_lock(&chip->lock);
val = readl(chip->base + SUN6I_ALRM_IRQ_STA);
if (val & SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND) {
val |= SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND;
writel(val, chip->base + SUN6I_ALRM_IRQ_STA);
rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
ret = IRQ_HANDLED;
}
spin_unlock(&chip->lock);
return ret;
}
static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip)
{
u32 alrm_val = 0;
u32 alrm_irq_val = 0;
u32 alrm_wake_val = 0;
unsigned long flags;
if (to) {
alrm_val = SUN6I_ALRM_EN_CNT_EN;
alrm_irq_val = SUN6I_ALRM_IRQ_EN_CNT_IRQ_EN;
alrm_wake_val = SUN6I_ALARM_CONFIG_WAKEUP;
} else {
writel(SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND,
chip->base + SUN6I_ALRM_IRQ_STA);
}
spin_lock_irqsave(&chip->lock, flags);
writel(alrm_val, chip->base + SUN6I_ALRM_EN);
writel(alrm_irq_val, chip->base + SUN6I_ALRM_IRQ_EN);
writel(alrm_wake_val, chip->base + SUN6I_ALARM_CONFIG);
spin_unlock_irqrestore(&chip->lock, flags);
}
static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
u32 date, time;
/*
* read again in case it changes
*/
do {
date = readl(chip->base + SUN6I_RTC_YMD);
time = readl(chip->base + SUN6I_RTC_HMS);
} while ((date != readl(chip->base + SUN6I_RTC_YMD)) ||
(time != readl(chip->base + SUN6I_RTC_HMS)));
if (chip->flags & RTC_LINEAR_DAY) {
/*
* Newer chips store a linear day number, the manual
* does not mandate any epoch base. The BSP driver uses
* the UNIX epoch, let's just copy that, as it's the
* easiest anyway.
*/
rtc_time64_to_tm((date & 0xffff) * SECS_PER_DAY, rtc_tm);
} else {
rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
rtc_tm->tm_mon = SUN6I_DATE_GET_MON_VALUE(date) - 1;
rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
/*
* switch from (data_year->min)-relative offset to
* a (1900)-relative one
*/
rtc_tm->tm_year += SUN6I_YEAR_OFF;
}
rtc_tm->tm_sec = SUN6I_TIME_GET_SEC_VALUE(time);
rtc_tm->tm_min = SUN6I_TIME_GET_MIN_VALUE(time);
rtc_tm->tm_hour = SUN6I_TIME_GET_HOUR_VALUE(time);
return 0;
}
static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
unsigned long flags;
u32 alrm_st;
u32 alrm_en;
spin_lock_irqsave(&chip->lock, flags);
alrm_en = readl(chip->base + SUN6I_ALRM_IRQ_EN);
alrm_st = readl(chip->base + SUN6I_ALRM_IRQ_STA);
spin_unlock_irqrestore(&chip->lock, flags);
wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN);
wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN);
rtc_time64_to_tm(chip->alarm, &wkalrm->time);
return 0;
}
static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time;
struct rtc_time tm_now;
time64_t time_set;
u32 counter_val, counter_val_hms;
int ret;
time_set = rtc_tm_to_time64(alrm_tm);
if (chip->flags & RTC_LINEAR_DAY) {
/*
* The alarm registers hold the actual alarm time, encoded
* in the same way (linear day + HMS) as the current time.
*/
counter_val_hms = SUN6I_TIME_SET_SEC_VALUE(alrm_tm->tm_sec) |
SUN6I_TIME_SET_MIN_VALUE(alrm_tm->tm_min) |
SUN6I_TIME_SET_HOUR_VALUE(alrm_tm->tm_hour);
/* The division will cut off the H:M:S part of alrm_tm. */
counter_val = div_u64(rtc_tm_to_time64(alrm_tm), SECS_PER_DAY);
} else {
/* The alarm register holds the number of seconds left. */
time64_t time_now;
ret = sun6i_rtc_gettime(dev, &tm_now);
if (ret < 0) {
dev_err(dev, "Error in getting time\n");
return -EINVAL;
}
time_now = rtc_tm_to_time64(&tm_now);
if (time_set <= time_now) {
dev_err(dev, "Date to set in the past\n");
return -EINVAL;
}
if ((time_set - time_now) > U32_MAX) {
dev_err(dev, "Date too far in the future\n");
return -EINVAL;
}
counter_val = time_set - time_now;
}
sun6i_rtc_setaie(0, chip);
writel(0, chip->base + SUN6I_ALRM_COUNTER);
if (chip->flags & RTC_LINEAR_DAY)
writel(0, chip->base + SUN6I_ALRM_COUNTER_HMS);
usleep_range(100, 300);
writel(counter_val, chip->base + SUN6I_ALRM_COUNTER);
if (chip->flags & RTC_LINEAR_DAY)
writel(counter_val_hms, chip->base + SUN6I_ALRM_COUNTER_HMS);
chip->alarm = time_set;
sun6i_rtc_setaie(wkalrm->enabled, chip);
return 0;
}
static int sun6i_rtc_wait(struct sun6i_rtc_dev *chip, int offset,
unsigned int mask, unsigned int ms_timeout)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout);
u32 reg;
do {
reg = readl(chip->base + offset);
reg &= mask;
if (!reg)
return 0;
} while (time_before(jiffies, timeout));
return -ETIMEDOUT;
}
static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
{
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
u32 date = 0;
u32 time = 0;
time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
SUN6I_TIME_SET_MIN_VALUE(rtc_tm->tm_min) |
SUN6I_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
if (chip->flags & RTC_LINEAR_DAY) {
/* The division will cut off the H:M:S part of rtc_tm. */
date = div_u64(rtc_tm_to_time64(rtc_tm), SECS_PER_DAY);
} else {
rtc_tm->tm_year -= SUN6I_YEAR_OFF;
rtc_tm->tm_mon += 1;
date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
date |= SUN6I_LEAP_SET_VALUE(1);
}
/* Check whether registers are writable */
if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
SUN6I_LOSC_CTRL_ACC_MASK, 50)) {
dev_err(dev, "rtc is still busy.\n");
return -EBUSY;
}
writel(time, chip->base + SUN6I_RTC_HMS);
/*
* After writing the RTC HH-MM-SS register, the
* SUN6I_LOSC_CTRL_RTC_HMS_ACC bit is set and it will not
* be cleared until the real writing operation is finished
*/
if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
SUN6I_LOSC_CTRL_RTC_HMS_ACC, 50)) {
dev_err(dev, "Failed to set rtc time.\n");
return -ETIMEDOUT;
}
writel(date, chip->base + SUN6I_RTC_YMD);
/*
* After writing the RTC YY-MM-DD register, the
* SUN6I_LOSC_CTRL_RTC_YMD_ACC bit is set and it will not
* be cleared until the real writing operation is finished
*/
if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
SUN6I_LOSC_CTRL_RTC_YMD_ACC, 50)) {
dev_err(dev, "Failed to set rtc time.\n");
return -ETIMEDOUT;
}
return 0;
}
static int sun6i_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
if (!enabled)
sun6i_rtc_setaie(enabled, chip);
return 0;
}
static const struct rtc_class_ops sun6i_rtc_ops = {
.read_time = sun6i_rtc_gettime,
.set_time = sun6i_rtc_settime,
.read_alarm = sun6i_rtc_getalarm,
.set_alarm = sun6i_rtc_setalarm,
.alarm_irq_enable = sun6i_rtc_alarm_irq_enable
};
static int sun6i_rtc_nvmem_read(void *priv, unsigned int offset, void *_val, size_t bytes)
{
struct sun6i_rtc_dev *chip = priv;
u32 *val = _val;
int i;
for (i = 0; i < bytes / 4; ++i)
val[i] = readl(chip->base + SUN6I_GP_DATA + offset + 4 * i);
return 0;
}
static int sun6i_rtc_nvmem_write(void *priv, unsigned int offset, void *_val, size_t bytes)
{
struct sun6i_rtc_dev *chip = priv;
u32 *val = _val;
int i;
for (i = 0; i < bytes / 4; ++i)
writel(val[i], chip->base + SUN6I_GP_DATA + offset + 4 * i);
return 0;
}
static struct nvmem_config sun6i_rtc_nvmem_cfg = {
.type = NVMEM_TYPE_BATTERY_BACKED,
.reg_read = sun6i_rtc_nvmem_read,
.reg_write = sun6i_rtc_nvmem_write,
.size = SUN6I_GP_DATA_SIZE,
.word_size = 4,
.stride = 4,
};
#ifdef CONFIG_PM_SLEEP
/* Enable IRQ wake on suspend, to wake up from RTC. */
static int sun6i_rtc_suspend(struct device *dev)
{
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(chip->irq);
return 0;
}
/* Disable IRQ wake on resume. */
static int sun6i_rtc_resume(struct device *dev)
{
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(chip->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(sun6i_rtc_pm_ops,
sun6i_rtc_suspend, sun6i_rtc_resume);
static void sun6i_rtc_bus_clk_cleanup(void *data)
{
struct clk *bus_clk = data;
clk_disable_unprepare(bus_clk);
}
static int sun6i_rtc_probe(struct platform_device *pdev)
{
struct sun6i_rtc_dev *chip = sun6i_rtc;
struct device *dev = &pdev->dev;
struct clk *bus_clk;
int ret;
bus_clk = devm_clk_get_optional(dev, "bus");
if (IS_ERR(bus_clk))
return PTR_ERR(bus_clk);
if (bus_clk) {
ret = clk_prepare_enable(bus_clk);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, sun6i_rtc_bus_clk_cleanup,
bus_clk);
if (ret)
return ret;
}
if (!chip) {
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
spin_lock_init(&chip->lock);
chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
if (IS_REACHABLE(CONFIG_SUN6I_RTC_CCU)) {
ret = sun6i_rtc_ccu_probe(dev, chip->base);
if (ret)
return ret;
}
}
platform_set_drvdata(pdev, chip);
chip->flags = (unsigned long)of_device_get_match_data(&pdev->dev);
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
return chip->irq;
ret = devm_request_irq(&pdev->dev, chip->irq, sun6i_rtc_alarmirq,
0, dev_name(&pdev->dev), chip);
if (ret) {
dev_err(&pdev->dev, "Could not request IRQ\n");
return ret;
}
/* clear the alarm counter value */
writel(0, chip->base + SUN6I_ALRM_COUNTER);
/* disable counter alarm */
writel(0, chip->base + SUN6I_ALRM_EN);
/* disable counter alarm interrupt */
writel(0, chip->base + SUN6I_ALRM_IRQ_EN);
/* disable week alarm */
writel(0, chip->base + SUN6I_ALRM1_EN);
/* disable week alarm interrupt */
writel(0, chip->base + SUN6I_ALRM1_IRQ_EN);
/* clear counter alarm pending interrupts */
writel(SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND,
chip->base + SUN6I_ALRM_IRQ_STA);
/* clear week alarm pending interrupts */
writel(SUN6I_ALRM1_IRQ_STA_WEEK_IRQ_PEND,
chip->base + SUN6I_ALRM1_IRQ_STA);
/* disable alarm wakeup */
writel(0, chip->base + SUN6I_ALARM_CONFIG);
clk_prepare_enable(chip->losc);
device_init_wakeup(&pdev->dev, 1);
chip->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
chip->rtc->ops = &sun6i_rtc_ops;
if (chip->flags & RTC_LINEAR_DAY)
chip->rtc->range_max = (65536 * SECS_PER_DAY) - 1;
else
chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
ret = devm_rtc_register_device(chip->rtc);
if (ret)
return ret;
sun6i_rtc_nvmem_cfg.priv = chip;
ret = devm_rtc_nvmem_register(chip->rtc, &sun6i_rtc_nvmem_cfg);
if (ret)
return ret;
return 0;
}
/*
* As far as RTC functionality goes, all models are the same. The
* datasheets claim that different models have different number of
* registers available for non-volatile storage, but experiments show
* that all SoCs have 16 registers available for this purpose.
*/
static const struct of_device_id sun6i_rtc_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-rtc" },
{ .compatible = "allwinner,sun8i-a23-rtc" },
{ .compatible = "allwinner,sun8i-h3-rtc" },
{ .compatible = "allwinner,sun8i-r40-rtc" },
{ .compatible = "allwinner,sun8i-v3-rtc" },
{ .compatible = "allwinner,sun50i-h5-rtc" },
{ .compatible = "allwinner,sun50i-h6-rtc" },
{ .compatible = "allwinner,sun50i-h616-rtc",
.data = (void *)RTC_LINEAR_DAY },
{ .compatible = "allwinner,sun50i-r329-rtc",
.data = (void *)RTC_LINEAR_DAY },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
static struct platform_driver sun6i_rtc_driver = {
.probe = sun6i_rtc_probe,
.driver = {
.name = "sun6i-rtc",
.of_match_table = sun6i_rtc_dt_ids,
.pm = &sun6i_rtc_pm_ops,
},
};
builtin_platform_driver(sun6i_rtc_driver);
| linux-master | drivers/rtc/rtc-sun6i.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Epson RTC-9701JE
*
* Copyright (C) 2008 Magnus Damm
*
* Based on rtc-max6902.c
*
* Copyright (C) 2006 8D Technologies inc.
* Copyright (C) 2004 Compulab Ltd.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#define RSECCNT 0x00 /* Second Counter */
#define RMINCNT 0x01 /* Minute Counter */
#define RHRCNT 0x02 /* Hour Counter */
#define RWKCNT 0x03 /* Week Counter */
#define RDAYCNT 0x04 /* Day Counter */
#define RMONCNT 0x05 /* Month Counter */
#define RYRCNT 0x06 /* Year Counter */
#define R100CNT 0x07 /* Y100 Counter */
#define RMINAR 0x08 /* Minute Alarm */
#define RHRAR 0x09 /* Hour Alarm */
#define RWKAR 0x0a /* Week/Day Alarm */
#define RTIMCNT 0x0c /* Interval Timer */
#define REXT 0x0d /* Extension Register */
#define RFLAG 0x0e /* RTC Flag Register */
#define RCR 0x0f /* RTC Control Register */
static int write_reg(struct device *dev, int address, unsigned char data)
{
struct spi_device *spi = to_spi_device(dev);
unsigned char buf[2];
buf[0] = address & 0x7f;
buf[1] = data;
return spi_write(spi, buf, ARRAY_SIZE(buf));
}
static int read_regs(struct device *dev, unsigned char *regs, int no_regs)
{
struct spi_device *spi = to_spi_device(dev);
u8 txbuf[1], rxbuf[1];
int k, ret;
ret = 0;
for (k = 0; ret == 0 && k < no_regs; k++) {
txbuf[0] = 0x80 | regs[k];
ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1);
regs[k] = rxbuf[0];
}
return ret;
}
static int r9701_get_datetime(struct device *dev, struct rtc_time *dt)
{
int ret;
unsigned char buf[] = { RSECCNT, RMINCNT, RHRCNT,
RDAYCNT, RMONCNT, RYRCNT };
ret = read_regs(dev, buf, ARRAY_SIZE(buf));
if (ret)
return ret;
dt->tm_sec = bcd2bin(buf[0]); /* RSECCNT */
dt->tm_min = bcd2bin(buf[1]); /* RMINCNT */
dt->tm_hour = bcd2bin(buf[2]); /* RHRCNT */
dt->tm_mday = bcd2bin(buf[3]); /* RDAYCNT */
dt->tm_mon = bcd2bin(buf[4]) - 1; /* RMONCNT */
dt->tm_year = bcd2bin(buf[5]) + 100; /* RYRCNT */
return 0;
}
static int r9701_set_datetime(struct device *dev, struct rtc_time *dt)
{
int ret;
ret = write_reg(dev, RHRCNT, bin2bcd(dt->tm_hour));
ret = ret ? ret : write_reg(dev, RMINCNT, bin2bcd(dt->tm_min));
ret = ret ? ret : write_reg(dev, RSECCNT, bin2bcd(dt->tm_sec));
ret = ret ? ret : write_reg(dev, RDAYCNT, bin2bcd(dt->tm_mday));
ret = ret ? ret : write_reg(dev, RMONCNT, bin2bcd(dt->tm_mon + 1));
ret = ret ? ret : write_reg(dev, RYRCNT, bin2bcd(dt->tm_year - 100));
return ret;
}
static const struct rtc_class_ops r9701_rtc_ops = {
.read_time = r9701_get_datetime,
.set_time = r9701_set_datetime,
};
static int r9701_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
unsigned char tmp;
int res;
tmp = R100CNT;
res = read_regs(&spi->dev, &tmp, 1);
if (res || tmp != 0x20) {
dev_err(&spi->dev, "cannot read RTC register\n");
return -ENODEV;
}
rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
spi_set_drvdata(spi, rtc);
rtc->ops = &r9701_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
return devm_rtc_register_device(rtc);
}
static struct spi_driver r9701_driver = {
.driver = {
.name = "rtc-r9701",
},
.probe = r9701_probe,
};
module_spi_driver(r9701_driver);
MODULE_DESCRIPTION("r9701 spi RTC driver");
MODULE_AUTHOR("Magnus Damm <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:rtc-r9701");
| linux-master | drivers/rtc/rtc-r9701.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the Epson RTC module RX-8010 SJ
*
* Copyright(C) Timesys Corporation 2015
* Copyright(C) General Electric Company 2015
*/
#include <linux/bcd.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#define RX8010_SEC 0x10
#define RX8010_MIN 0x11
#define RX8010_HOUR 0x12
#define RX8010_WDAY 0x13
#define RX8010_MDAY 0x14
#define RX8010_MONTH 0x15
#define RX8010_YEAR 0x16
#define RX8010_RESV17 0x17
#define RX8010_ALMIN 0x18
#define RX8010_ALHOUR 0x19
#define RX8010_ALWDAY 0x1A
#define RX8010_TCOUNT0 0x1B
#define RX8010_TCOUNT1 0x1C
#define RX8010_EXT 0x1D
#define RX8010_FLAG 0x1E
#define RX8010_CTRL 0x1F
/* 0x20 to 0x2F are user registers */
#define RX8010_RESV30 0x30
#define RX8010_RESV31 0x31
#define RX8010_IRQ 0x32
#define RX8010_EXT_WADA BIT(3)
#define RX8010_FLAG_VLF BIT(1)
#define RX8010_FLAG_AF BIT(3)
#define RX8010_FLAG_TF BIT(4)
#define RX8010_FLAG_UF BIT(5)
#define RX8010_CTRL_AIE BIT(3)
#define RX8010_CTRL_UIE BIT(5)
#define RX8010_CTRL_STOP BIT(6)
#define RX8010_CTRL_TEST BIT(7)
#define RX8010_ALARM_AE BIT(7)
static const struct i2c_device_id rx8010_id[] = {
{ "rx8010", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8010_id);
static const __maybe_unused struct of_device_id rx8010_of_match[] = {
{ .compatible = "epson,rx8010" },
{ }
};
MODULE_DEVICE_TABLE(of, rx8010_of_match);
struct rx8010_data {
struct regmap *regs;
struct rtc_device *rtc;
u8 ctrlreg;
};
static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8010_data *rx8010 = i2c_get_clientdata(client);
int flagreg, err;
rtc_lock(rx8010->rtc);
err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
if (err) {
rtc_unlock(rx8010->rtc);
return IRQ_NONE;
}
if (flagreg & RX8010_FLAG_VLF)
dev_warn(&client->dev, "Frequency stop detected\n");
if (flagreg & RX8010_FLAG_TF) {
flagreg &= ~RX8010_FLAG_TF;
rtc_update_irq(rx8010->rtc, 1, RTC_PF | RTC_IRQF);
}
if (flagreg & RX8010_FLAG_AF) {
flagreg &= ~RX8010_FLAG_AF;
rtc_update_irq(rx8010->rtc, 1, RTC_AF | RTC_IRQF);
}
if (flagreg & RX8010_FLAG_UF) {
flagreg &= ~RX8010_FLAG_UF;
rtc_update_irq(rx8010->rtc, 1, RTC_UF | RTC_IRQF);
}
err = regmap_write(rx8010->regs, RX8010_FLAG, flagreg);
rtc_unlock(rx8010->rtc);
return err ? IRQ_NONE : IRQ_HANDLED;
}
static int rx8010_get_time(struct device *dev, struct rtc_time *dt)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
u8 date[RX8010_YEAR - RX8010_SEC + 1];
int flagreg, err;
err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
if (err)
return err;
if (flagreg & RX8010_FLAG_VLF) {
dev_warn(dev, "Frequency stop detected\n");
return -EINVAL;
}
err = regmap_bulk_read(rx8010->regs, RX8010_SEC, date, sizeof(date));
if (err)
return err;
dt->tm_sec = bcd2bin(date[RX8010_SEC - RX8010_SEC] & 0x7f);
dt->tm_min = bcd2bin(date[RX8010_MIN - RX8010_SEC] & 0x7f);
dt->tm_hour = bcd2bin(date[RX8010_HOUR - RX8010_SEC] & 0x3f);
dt->tm_mday = bcd2bin(date[RX8010_MDAY - RX8010_SEC] & 0x3f);
dt->tm_mon = bcd2bin(date[RX8010_MONTH - RX8010_SEC] & 0x1f) - 1;
dt->tm_year = bcd2bin(date[RX8010_YEAR - RX8010_SEC]) + 100;
dt->tm_wday = ffs(date[RX8010_WDAY - RX8010_SEC] & 0x7f);
return 0;
}
static int rx8010_set_time(struct device *dev, struct rtc_time *dt)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
u8 date[RX8010_YEAR - RX8010_SEC + 1];
int err;
/* set STOP bit before changing clock/calendar */
err = regmap_set_bits(rx8010->regs, RX8010_CTRL, RX8010_CTRL_STOP);
if (err)
return err;
date[RX8010_SEC - RX8010_SEC] = bin2bcd(dt->tm_sec);
date[RX8010_MIN - RX8010_SEC] = bin2bcd(dt->tm_min);
date[RX8010_HOUR - RX8010_SEC] = bin2bcd(dt->tm_hour);
date[RX8010_MDAY - RX8010_SEC] = bin2bcd(dt->tm_mday);
date[RX8010_MONTH - RX8010_SEC] = bin2bcd(dt->tm_mon + 1);
date[RX8010_YEAR - RX8010_SEC] = bin2bcd(dt->tm_year - 100);
date[RX8010_WDAY - RX8010_SEC] = bin2bcd(1 << dt->tm_wday);
err = regmap_bulk_write(rx8010->regs, RX8010_SEC, date, sizeof(date));
if (err)
return err;
/* clear STOP bit after changing clock/calendar */
err = regmap_clear_bits(rx8010->regs, RX8010_CTRL, RX8010_CTRL_STOP);
if (err)
return err;
err = regmap_clear_bits(rx8010->regs, RX8010_FLAG, RX8010_FLAG_VLF);
if (err)
return err;
return 0;
}
static int rx8010_init(struct device *dev)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
u8 ctrl[2];
int need_clear = 0, err;
/* Initialize reserved registers as specified in datasheet */
err = regmap_write(rx8010->regs, RX8010_RESV17, 0xD8);
if (err)
return err;
err = regmap_write(rx8010->regs, RX8010_RESV30, 0x00);
if (err)
return err;
err = regmap_write(rx8010->regs, RX8010_RESV31, 0x08);
if (err)
return err;
err = regmap_write(rx8010->regs, RX8010_IRQ, 0x00);
if (err)
return err;
err = regmap_bulk_read(rx8010->regs, RX8010_FLAG, ctrl, 2);
if (err)
return err;
if (ctrl[0] & RX8010_FLAG_VLF)
dev_warn(dev, "Frequency stop was detected\n");
if (ctrl[0] & RX8010_FLAG_AF) {
dev_warn(dev, "Alarm was detected\n");
need_clear = 1;
}
if (ctrl[0] & RX8010_FLAG_TF)
need_clear = 1;
if (ctrl[0] & RX8010_FLAG_UF)
need_clear = 1;
if (need_clear) {
ctrl[0] &= ~(RX8010_FLAG_AF | RX8010_FLAG_TF | RX8010_FLAG_UF);
err = regmap_write(rx8010->regs, RX8010_FLAG, ctrl[0]);
if (err)
return err;
}
rx8010->ctrlreg = (ctrl[1] & ~RX8010_CTRL_TEST);
return 0;
}
static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
u8 alarmvals[3];
int flagreg, err;
err = regmap_bulk_read(rx8010->regs, RX8010_ALMIN, alarmvals, 3);
if (err)
return err;
err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
if (err)
return err;
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
t->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
if (!(alarmvals[2] & RX8010_ALARM_AE))
t->time.tm_mday = bcd2bin(alarmvals[2] & 0x7f);
t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
return 0;
}
static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
u8 alarmvals[3];
int err;
if (rx8010->ctrlreg & (RX8010_CTRL_AIE | RX8010_CTRL_UIE)) {
rx8010->ctrlreg &= ~(RX8010_CTRL_AIE | RX8010_CTRL_UIE);
err = regmap_write(rx8010->regs, RX8010_CTRL, rx8010->ctrlreg);
if (err)
return err;
}
err = regmap_clear_bits(rx8010->regs, RX8010_FLAG, RX8010_FLAG_AF);
if (err)
return err;
alarmvals[0] = bin2bcd(t->time.tm_min);
alarmvals[1] = bin2bcd(t->time.tm_hour);
alarmvals[2] = bin2bcd(t->time.tm_mday);
err = regmap_bulk_write(rx8010->regs, RX8010_ALMIN, alarmvals, 2);
if (err)
return err;
err = regmap_clear_bits(rx8010->regs, RX8010_EXT, RX8010_EXT_WADA);
if (err)
return err;
if (alarmvals[2] == 0)
alarmvals[2] |= RX8010_ALARM_AE;
err = regmap_write(rx8010->regs, RX8010_ALWDAY, alarmvals[2]);
if (err)
return err;
if (t->enabled) {
if (rx8010->rtc->uie_rtctimer.enabled)
rx8010->ctrlreg |= RX8010_CTRL_UIE;
if (rx8010->rtc->aie_timer.enabled)
rx8010->ctrlreg |=
(RX8010_CTRL_AIE | RX8010_CTRL_UIE);
err = regmap_write(rx8010->regs, RX8010_CTRL, rx8010->ctrlreg);
if (err)
return err;
}
return 0;
}
static int rx8010_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
int err;
u8 ctrl;
ctrl = rx8010->ctrlreg;
if (enabled) {
if (rx8010->rtc->uie_rtctimer.enabled)
ctrl |= RX8010_CTRL_UIE;
if (rx8010->rtc->aie_timer.enabled)
ctrl |= (RX8010_CTRL_AIE | RX8010_CTRL_UIE);
} else {
if (!rx8010->rtc->uie_rtctimer.enabled)
ctrl &= ~RX8010_CTRL_UIE;
if (!rx8010->rtc->aie_timer.enabled)
ctrl &= ~RX8010_CTRL_AIE;
}
err = regmap_clear_bits(rx8010->regs, RX8010_FLAG, RX8010_FLAG_AF);
if (err)
return err;
if (ctrl != rx8010->ctrlreg) {
rx8010->ctrlreg = ctrl;
err = regmap_write(rx8010->regs, RX8010_CTRL, rx8010->ctrlreg);
if (err)
return err;
}
return 0;
}
static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
int tmp, flagreg, err;
switch (cmd) {
case RTC_VL_READ:
err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
if (err)
return err;
tmp = flagreg & RX8010_FLAG_VLF ? RTC_VL_DATA_INVALID : 0;
return put_user(tmp, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
static const struct rtc_class_ops rx8010_rtc_ops = {
.read_time = rx8010_get_time,
.set_time = rx8010_set_time,
.ioctl = rx8010_ioctl,
.read_alarm = rx8010_read_alarm,
.set_alarm = rx8010_set_alarm,
.alarm_irq_enable = rx8010_alarm_irq_enable,
};
static const struct regmap_config rx8010_regmap_config = {
.name = "rx8010-rtc",
.reg_bits = 8,
.val_bits = 8,
};
static int rx8010_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct rx8010_data *rx8010;
int err = 0;
rx8010 = devm_kzalloc(dev, sizeof(*rx8010), GFP_KERNEL);
if (!rx8010)
return -ENOMEM;
i2c_set_clientdata(client, rx8010);
rx8010->regs = devm_regmap_init_i2c(client, &rx8010_regmap_config);
if (IS_ERR(rx8010->regs))
return PTR_ERR(rx8010->regs);
err = rx8010_init(dev);
if (err)
return err;
rx8010->rtc = devm_rtc_allocate_device(dev);
if (IS_ERR(rx8010->rtc))
return PTR_ERR(rx8010->rtc);
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&client->dev))
irqflags = 0;
err = devm_request_threaded_irq(dev, client->irq, NULL,
rx8010_irq_1_handler,
irqflags | IRQF_ONESHOT,
"rx8010", client);
if (err) {
dev_err(dev, "unable to request IRQ\n");
return err;
}
} else {
clear_bit(RTC_FEATURE_ALARM, rx8010->rtc->features);
}
rx8010->rtc->ops = &rx8010_rtc_ops;
rx8010->rtc->max_user_freq = 1;
rx8010->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rx8010->rtc->range_max = RTC_TIMESTAMP_END_2099;
return devm_rtc_register_device(rx8010->rtc);
}
static struct i2c_driver rx8010_driver = {
.driver = {
.name = "rtc-rx8010",
.of_match_table = of_match_ptr(rx8010_of_match),
},
.probe = rx8010_probe,
.id_table = rx8010_id,
};
module_i2c_driver(rx8010_driver);
MODULE_AUTHOR("Akshay Bhat <[email protected]>");
MODULE_DESCRIPTION("Epson RX8010SJ RTC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-rx8010.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for TI BQ32000 RTC.
*
* Copyright (C) 2009 Semihalf.
* Copyright (C) 2014 Pavel Machek <[email protected]>
*
* You can get hardware description at
* https://www.ti.com/lit/ds/symlink/bq32000.pdf
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/kstrtox.h>
#include <linux/errno.h>
#include <linux/bcd.h>
#define BQ32K_SECONDS 0x00 /* Seconds register address */
#define BQ32K_SECONDS_MASK 0x7F /* Mask over seconds value */
#define BQ32K_STOP 0x80 /* Oscillator Stop flat */
#define BQ32K_MINUTES 0x01 /* Minutes register address */
#define BQ32K_MINUTES_MASK 0x7F /* Mask over minutes value */
#define BQ32K_OF 0x80 /* Oscillator Failure flag */
#define BQ32K_HOURS_MASK 0x3F /* Mask over hours value */
#define BQ32K_CENT 0x40 /* Century flag */
#define BQ32K_CENT_EN 0x80 /* Century flag enable bit */
#define BQ32K_CALIBRATION 0x07 /* CAL_CFG1, calibration and control */
#define BQ32K_TCH2 0x08 /* Trickle charge enable */
#define BQ32K_CFG2 0x09 /* Trickle charger control */
#define BQ32K_TCFE BIT(6) /* Trickle charge FET bypass */
#define MAX_LEN 10 /* Maximum number of consecutive
* register for this particular RTC.
*/
struct bq32k_regs {
uint8_t seconds;
uint8_t minutes;
uint8_t cent_hours;
uint8_t day;
uint8_t date;
uint8_t month;
uint8_t years;
};
static struct i2c_driver bq32k_driver;
static int bq32k_read(struct device *dev, void *data, uint8_t off, uint8_t len)
{
struct i2c_client *client = to_i2c_client(dev);
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0,
.len = 1,
.buf = &off,
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.len = len,
.buf = data,
}
};
if (i2c_transfer(client->adapter, msgs, 2) == 2)
return 0;
return -EIO;
}
static int bq32k_write(struct device *dev, void *data, uint8_t off, uint8_t len)
{
struct i2c_client *client = to_i2c_client(dev);
uint8_t buffer[MAX_LEN + 1];
buffer[0] = off;
memcpy(&buffer[1], data, len);
if (i2c_master_send(client, buffer, len + 1) == len + 1)
return 0;
return -EIO;
}
static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct bq32k_regs regs;
int error;
error = bq32k_read(dev, ®s, 0, sizeof(regs));
if (error)
return error;
/*
* In case of oscillator failure, the register contents should be
* considered invalid. The flag is cleared the next time the RTC is set.
*/
if (regs.minutes & BQ32K_OF)
return -EINVAL;
tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK);
tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
tm->tm_mday = bcd2bin(regs.date);
tm->tm_wday = bcd2bin(regs.day) - 1;
tm->tm_mon = bcd2bin(regs.month) - 1;
tm->tm_year = bcd2bin(regs.years) +
((regs.cent_hours & BQ32K_CENT) ? 100 : 0);
return 0;
}
static int bq32k_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct bq32k_regs regs;
regs.seconds = bin2bcd(tm->tm_sec);
regs.minutes = bin2bcd(tm->tm_min);
regs.cent_hours = bin2bcd(tm->tm_hour) | BQ32K_CENT_EN;
regs.day = bin2bcd(tm->tm_wday + 1);
regs.date = bin2bcd(tm->tm_mday);
regs.month = bin2bcd(tm->tm_mon + 1);
if (tm->tm_year >= 100) {
regs.cent_hours |= BQ32K_CENT;
regs.years = bin2bcd(tm->tm_year - 100);
} else
regs.years = bin2bcd(tm->tm_year);
return bq32k_write(dev, ®s, 0, sizeof(regs));
}
static const struct rtc_class_ops bq32k_rtc_ops = {
.read_time = bq32k_rtc_read_time,
.set_time = bq32k_rtc_set_time,
};
static int trickle_charger_of_init(struct device *dev, struct device_node *node)
{
unsigned char reg;
int error;
u32 ohms = 0;
if (of_property_read_u32(node, "trickle-resistor-ohms" , &ohms))
return 0;
switch (ohms) {
case 180+940:
/*
* TCHE[3:0] == 0x05, TCH2 == 1, TCFE == 0 (charging
* over diode and 940ohm resistor)
*/
if (of_property_read_bool(node, "trickle-diode-disable")) {
dev_err(dev, "diode and resistor mismatch\n");
return -EINVAL;
}
reg = 0x05;
break;
case 180+20000:
/* diode disabled */
if (!of_property_read_bool(node, "trickle-diode-disable")) {
dev_err(dev, "bq32k: diode and resistor mismatch\n");
return -EINVAL;
}
reg = 0x45;
break;
default:
dev_err(dev, "invalid resistor value (%d)\n", ohms);
return -EINVAL;
}
error = bq32k_write(dev, ®, BQ32K_CFG2, 1);
if (error)
return error;
reg = 0x20;
error = bq32k_write(dev, ®, BQ32K_TCH2, 1);
if (error)
return error;
dev_info(dev, "Enabled trickle RTC battery charge.\n");
return 0;
}
static ssize_t bq32k_sysfs_show_tricklecharge_bypass(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int reg, error;
error = bq32k_read(dev, ®, BQ32K_CFG2, 1);
if (error)
return error;
return sprintf(buf, "%d\n", (reg & BQ32K_TCFE) ? 1 : 0);
}
static ssize_t bq32k_sysfs_store_tricklecharge_bypass(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int reg, enable, error;
if (kstrtoint(buf, 0, &enable))
return -EINVAL;
error = bq32k_read(dev, ®, BQ32K_CFG2, 1);
if (error)
return error;
if (enable) {
reg |= BQ32K_TCFE;
error = bq32k_write(dev, ®, BQ32K_CFG2, 1);
if (error)
return error;
dev_info(dev, "Enabled trickle charge FET bypass.\n");
} else {
reg &= ~BQ32K_TCFE;
error = bq32k_write(dev, ®, BQ32K_CFG2, 1);
if (error)
return error;
dev_info(dev, "Disabled trickle charge FET bypass.\n");
}
return count;
}
static DEVICE_ATTR(trickle_charge_bypass, 0644,
bq32k_sysfs_show_tricklecharge_bypass,
bq32k_sysfs_store_tricklecharge_bypass);
static int bq32k_sysfs_register(struct device *dev)
{
return device_create_file(dev, &dev_attr_trickle_charge_bypass);
}
static void bq32k_sysfs_unregister(struct device *dev)
{
device_remove_file(dev, &dev_attr_trickle_charge_bypass);
}
static int bq32k_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct rtc_device *rtc;
uint8_t reg;
int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
/* Check Oscillator Stop flag */
error = bq32k_read(dev, ®, BQ32K_SECONDS, 1);
if (!error && (reg & BQ32K_STOP)) {
dev_warn(dev, "Oscillator was halted. Restarting...\n");
reg &= ~BQ32K_STOP;
error = bq32k_write(dev, ®, BQ32K_SECONDS, 1);
}
if (error)
return error;
/* Check Oscillator Failure flag */
error = bq32k_read(dev, ®, BQ32K_MINUTES, 1);
if (error)
return error;
if (reg & BQ32K_OF)
dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
if (client->dev.of_node)
trickle_charger_of_init(dev, client->dev.of_node);
rtc = devm_rtc_device_register(&client->dev, bq32k_driver.driver.name,
&bq32k_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
error = bq32k_sysfs_register(&client->dev);
if (error) {
dev_err(&client->dev,
"Unable to create sysfs entries for rtc bq32000\n");
return error;
}
i2c_set_clientdata(client, rtc);
return 0;
}
static void bq32k_remove(struct i2c_client *client)
{
bq32k_sysfs_unregister(&client->dev);
}
static const struct i2c_device_id bq32k_id[] = {
{ "bq32000", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, bq32k_id);
static const __maybe_unused struct of_device_id bq32k_of_match[] = {
{ .compatible = "ti,bq32000" },
{ }
};
MODULE_DEVICE_TABLE(of, bq32k_of_match);
static struct i2c_driver bq32k_driver = {
.driver = {
.name = "bq32k",
.of_match_table = of_match_ptr(bq32k_of_match),
},
.probe = bq32k_probe,
.remove = bq32k_remove,
.id_table = bq32k_id,
};
module_i2c_driver(bq32k_driver);
MODULE_AUTHOR("Semihalf, Piotr Ziecik <[email protected]>");
MODULE_DESCRIPTION("TI BQ32000 I2C RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-bq32k.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/rtc/rtc-pl031.c
*
* Real Time Clock interface for ARM AMBA PrimeCell 031 RTC
*
* Author: Deepak Saxena <[email protected]>
*
* Copyright 2006 (c) MontaVista Software, Inc.
*
* Author: Mian Yousaf Kaukab <[email protected]>
* Copyright 2010 (c) ST-Ericsson AB
*/
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
/*
* Register definitions
*/
#define RTC_DR 0x00 /* Data read register */
#define RTC_MR 0x04 /* Match register */
#define RTC_LR 0x08 /* Data load register */
#define RTC_CR 0x0c /* Control register */
#define RTC_IMSC 0x10 /* Interrupt mask and set register */
#define RTC_RIS 0x14 /* Raw interrupt status register */
#define RTC_MIS 0x18 /* Masked interrupt status register */
#define RTC_ICR 0x1c /* Interrupt clear register */
/* ST variants have additional timer functionality */
#define RTC_TDR 0x20 /* Timer data read register */
#define RTC_TLR 0x24 /* Timer data load register */
#define RTC_TCR 0x28 /* Timer control register */
#define RTC_YDR 0x30 /* Year data read register */
#define RTC_YMR 0x34 /* Year match register */
#define RTC_YLR 0x38 /* Year data load register */
#define RTC_CR_EN (1 << 0) /* counter enable bit */
#define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */
#define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
/* Common bit definitions for Interrupt status and control registers */
#define RTC_BIT_AI (1 << 0) /* Alarm interrupt bit */
#define RTC_BIT_PI (1 << 1) /* Periodic interrupt bit. ST variants only. */
/* Common bit definations for ST v2 for reading/writing time */
#define RTC_SEC_SHIFT 0
#define RTC_SEC_MASK (0x3F << RTC_SEC_SHIFT) /* Second [0-59] */
#define RTC_MIN_SHIFT 6
#define RTC_MIN_MASK (0x3F << RTC_MIN_SHIFT) /* Minute [0-59] */
#define RTC_HOUR_SHIFT 12
#define RTC_HOUR_MASK (0x1F << RTC_HOUR_SHIFT) /* Hour [0-23] */
#define RTC_WDAY_SHIFT 17
#define RTC_WDAY_MASK (0x7 << RTC_WDAY_SHIFT) /* Day of Week [1-7] 1=Sunday */
#define RTC_MDAY_SHIFT 20
#define RTC_MDAY_MASK (0x1F << RTC_MDAY_SHIFT) /* Day of Month [1-31] */
#define RTC_MON_SHIFT 25
#define RTC_MON_MASK (0xF << RTC_MON_SHIFT) /* Month [1-12] 1=January */
#define RTC_TIMER_FREQ 32768
/**
* struct pl031_vendor_data - per-vendor variations
* @ops: the vendor-specific operations used on this silicon version
* @clockwatch: if this is an ST Microelectronics silicon version with a
* clockwatch function
* @st_weekday: if this is an ST Microelectronics silicon version that need
* the weekday fix
* @irqflags: special IRQ flags per variant
*/
struct pl031_vendor_data {
struct rtc_class_ops ops;
bool clockwatch;
bool st_weekday;
unsigned long irqflags;
time64_t range_min;
timeu64_t range_max;
};
struct pl031_local {
struct pl031_vendor_data *vendor;
struct rtc_device *rtc;
void __iomem *base;
};
static int pl031_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
unsigned long imsc;
/* Clear any pending alarm interrupts. */
writel(RTC_BIT_AI, ldata->base + RTC_ICR);
imsc = readl(ldata->base + RTC_IMSC);
if (enabled == 1)
writel(imsc | RTC_BIT_AI, ldata->base + RTC_IMSC);
else
writel(imsc & ~RTC_BIT_AI, ldata->base + RTC_IMSC);
return 0;
}
/*
* Convert Gregorian date to ST v2 RTC format.
*/
static int pl031_stv2_tm_to_time(struct device *dev,
struct rtc_time *tm, unsigned long *st_time,
unsigned long *bcd_year)
{
int year = tm->tm_year + 1900;
int wday = tm->tm_wday;
/* wday masking is not working in hardware so wday must be valid */
if (wday < -1 || wday > 6) {
dev_err(dev, "invalid wday value %d\n", tm->tm_wday);
return -EINVAL;
} else if (wday == -1) {
/* wday is not provided, calculate it here */
struct rtc_time calc_tm;
rtc_time64_to_tm(rtc_tm_to_time64(tm), &calc_tm);
wday = calc_tm.tm_wday;
}
*bcd_year = (bin2bcd(year % 100) | bin2bcd(year / 100) << 8);
*st_time = ((tm->tm_mon + 1) << RTC_MON_SHIFT)
| (tm->tm_mday << RTC_MDAY_SHIFT)
| ((wday + 1) << RTC_WDAY_SHIFT)
| (tm->tm_hour << RTC_HOUR_SHIFT)
| (tm->tm_min << RTC_MIN_SHIFT)
| (tm->tm_sec << RTC_SEC_SHIFT);
return 0;
}
/*
* Convert ST v2 RTC format to Gregorian date.
*/
static int pl031_stv2_time_to_tm(unsigned long st_time, unsigned long bcd_year,
struct rtc_time *tm)
{
tm->tm_year = bcd2bin(bcd_year) + (bcd2bin(bcd_year >> 8) * 100);
tm->tm_mon = ((st_time & RTC_MON_MASK) >> RTC_MON_SHIFT) - 1;
tm->tm_mday = ((st_time & RTC_MDAY_MASK) >> RTC_MDAY_SHIFT);
tm->tm_wday = ((st_time & RTC_WDAY_MASK) >> RTC_WDAY_SHIFT) - 1;
tm->tm_hour = ((st_time & RTC_HOUR_MASK) >> RTC_HOUR_SHIFT);
tm->tm_min = ((st_time & RTC_MIN_MASK) >> RTC_MIN_SHIFT);
tm->tm_sec = ((st_time & RTC_SEC_MASK) >> RTC_SEC_SHIFT);
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year -= 1900;
return 0;
}
static int pl031_stv2_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
pl031_stv2_time_to_tm(readl(ldata->base + RTC_DR),
readl(ldata->base + RTC_YDR), tm);
return 0;
}
static int pl031_stv2_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned long time;
unsigned long bcd_year;
struct pl031_local *ldata = dev_get_drvdata(dev);
int ret;
ret = pl031_stv2_tm_to_time(dev, tm, &time, &bcd_year);
if (ret == 0) {
writel(bcd_year, ldata->base + RTC_YLR);
writel(time, ldata->base + RTC_LR);
}
return ret;
}
static int pl031_stv2_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
int ret;
ret = pl031_stv2_time_to_tm(readl(ldata->base + RTC_MR),
readl(ldata->base + RTC_YMR), &alarm->time);
alarm->pending = readl(ldata->base + RTC_RIS) & RTC_BIT_AI;
alarm->enabled = readl(ldata->base + RTC_IMSC) & RTC_BIT_AI;
return ret;
}
static int pl031_stv2_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
unsigned long time;
unsigned long bcd_year;
int ret;
ret = pl031_stv2_tm_to_time(dev, &alarm->time,
&time, &bcd_year);
if (ret == 0) {
writel(bcd_year, ldata->base + RTC_YMR);
writel(time, ldata->base + RTC_MR);
pl031_alarm_irq_enable(dev, alarm->enabled);
}
return ret;
}
static irqreturn_t pl031_interrupt(int irq, void *dev_id)
{
struct pl031_local *ldata = dev_id;
unsigned long rtcmis;
unsigned long events = 0;
rtcmis = readl(ldata->base + RTC_MIS);
if (rtcmis & RTC_BIT_AI) {
writel(RTC_BIT_AI, ldata->base + RTC_ICR);
events |= (RTC_AF | RTC_IRQF);
rtc_update_irq(ldata->rtc, 1, events);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int pl031_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
rtc_time64_to_tm(readl(ldata->base + RTC_DR), tm);
return 0;
}
static int pl031_set_time(struct device *dev, struct rtc_time *tm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
writel(rtc_tm_to_time64(tm), ldata->base + RTC_LR);
return 0;
}
static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
rtc_time64_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
alarm->pending = readl(ldata->base + RTC_RIS) & RTC_BIT_AI;
alarm->enabled = readl(ldata->base + RTC_IMSC) & RTC_BIT_AI;
return 0;
}
static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
writel(rtc_tm_to_time64(&alarm->time), ldata->base + RTC_MR);
pl031_alarm_irq_enable(dev, alarm->enabled);
return 0;
}
static void pl031_remove(struct amba_device *adev)
{
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
dev_pm_clear_wake_irq(&adev->dev);
device_init_wakeup(&adev->dev, false);
if (adev->irq[0])
free_irq(adev->irq[0], ldata);
amba_release_regions(adev);
}
static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
struct pl031_local *ldata;
struct pl031_vendor_data *vendor = id->data;
struct rtc_class_ops *ops;
unsigned long time, data;
ret = amba_request_regions(adev, NULL);
if (ret)
goto err_req;
ldata = devm_kzalloc(&adev->dev, sizeof(struct pl031_local),
GFP_KERNEL);
ops = devm_kmemdup(&adev->dev, &vendor->ops, sizeof(vendor->ops),
GFP_KERNEL);
if (!ldata || !ops) {
ret = -ENOMEM;
goto out;
}
ldata->vendor = vendor;
ldata->base = devm_ioremap(&adev->dev, adev->res.start,
resource_size(&adev->res));
if (!ldata->base) {
ret = -ENOMEM;
goto out;
}
amba_set_drvdata(adev, ldata);
dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));
dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev));
data = readl(ldata->base + RTC_CR);
/* Enable the clockwatch on ST Variants */
if (vendor->clockwatch)
data |= RTC_CR_CWEN;
else
data |= RTC_CR_EN;
writel(data, ldata->base + RTC_CR);
/*
* On ST PL031 variants, the RTC reset value does not provide correct
* weekday for 2000-01-01. Correct the erroneous sunday to saturday.
*/
if (vendor->st_weekday) {
if (readl(ldata->base + RTC_YDR) == 0x2000) {
time = readl(ldata->base + RTC_DR);
if ((time &
(RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK))
== 0x02120000) {
time = time | (0x7 << RTC_WDAY_SHIFT);
writel(0x2000, ldata->base + RTC_YLR);
writel(time, ldata->base + RTC_LR);
}
}
}
device_init_wakeup(&adev->dev, true);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
goto out;
}
if (!adev->irq[0])
clear_bit(RTC_FEATURE_ALARM, ldata->rtc->features);
ldata->rtc->ops = ops;
ldata->rtc->range_min = vendor->range_min;
ldata->rtc->range_max = vendor->range_max;
ret = devm_rtc_register_device(ldata->rtc);
if (ret)
goto out;
if (adev->irq[0]) {
ret = request_irq(adev->irq[0], pl031_interrupt,
vendor->irqflags, "rtc-pl031", ldata);
if (ret)
goto out;
dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
return 0;
out:
amba_release_regions(adev);
err_req:
return ret;
}
/* Operations for the original ARM version */
static struct pl031_vendor_data arm_pl031 = {
.ops = {
.read_time = pl031_read_time,
.set_time = pl031_set_time,
.read_alarm = pl031_read_alarm,
.set_alarm = pl031_set_alarm,
.alarm_irq_enable = pl031_alarm_irq_enable,
},
.range_max = U32_MAX,
};
/* The First ST derivative */
static struct pl031_vendor_data stv1_pl031 = {
.ops = {
.read_time = pl031_read_time,
.set_time = pl031_set_time,
.read_alarm = pl031_read_alarm,
.set_alarm = pl031_set_alarm,
.alarm_irq_enable = pl031_alarm_irq_enable,
},
.clockwatch = true,
.st_weekday = true,
.range_max = U32_MAX,
};
/* And the second ST derivative */
static struct pl031_vendor_data stv2_pl031 = {
.ops = {
.read_time = pl031_stv2_read_time,
.set_time = pl031_stv2_set_time,
.read_alarm = pl031_stv2_read_alarm,
.set_alarm = pl031_stv2_set_alarm,
.alarm_irq_enable = pl031_alarm_irq_enable,
},
.clockwatch = true,
.st_weekday = true,
/*
* This variant shares the IRQ with another block and must not
* suspend that IRQ line.
* TODO check if it shares with IRQF_NO_SUSPEND user, else we can
* remove IRQF_COND_SUSPEND
*/
.irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
.range_min = RTC_TIMESTAMP_BEGIN_0000,
.range_max = RTC_TIMESTAMP_END_9999,
};
static const struct amba_id pl031_ids[] = {
{
.id = 0x00041031,
.mask = 0x000fffff,
.data = &arm_pl031,
},
/* ST Micro variants */
{
.id = 0x00180031,
.mask = 0x00ffffff,
.data = &stv1_pl031,
},
{
.id = 0x00280031,
.mask = 0x00ffffff,
.data = &stv2_pl031,
},
{0, 0},
};
MODULE_DEVICE_TABLE(amba, pl031_ids);
static struct amba_driver pl031_driver = {
.drv = {
.name = "rtc-pl031",
},
.id_table = pl031_ids,
.probe = pl031_probe,
.remove = pl031_remove,
};
module_amba_driver(pl031_driver);
MODULE_AUTHOR("Deepak Saxena <[email protected]>");
MODULE_DESCRIPTION("ARM AMBA PL031 RTC Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-pl031.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HID Sensor Time Driver
* Copyright (c) 2012, Alexander Holler.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/rtc.h>
enum hid_time_channel {
CHANNEL_SCAN_INDEX_YEAR,
CHANNEL_SCAN_INDEX_MONTH,
CHANNEL_SCAN_INDEX_DAY,
CHANNEL_SCAN_INDEX_HOUR,
CHANNEL_SCAN_INDEX_MINUTE,
CHANNEL_SCAN_INDEX_SECOND,
TIME_RTC_CHANNEL_MAX,
};
struct hid_time_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info info[TIME_RTC_CHANNEL_MAX];
struct rtc_time last_time;
spinlock_t lock_last_time;
struct completion comp_last_time;
struct rtc_time time_buf;
struct rtc_device *rtc;
};
static const u32 hid_time_addresses[TIME_RTC_CHANNEL_MAX] = {
HID_USAGE_SENSOR_TIME_YEAR,
HID_USAGE_SENSOR_TIME_MONTH,
HID_USAGE_SENSOR_TIME_DAY,
HID_USAGE_SENSOR_TIME_HOUR,
HID_USAGE_SENSOR_TIME_MINUTE,
HID_USAGE_SENSOR_TIME_SECOND,
};
/* Channel names for verbose error messages */
static const char * const hid_time_channel_names[TIME_RTC_CHANNEL_MAX] = {
"year", "month", "day", "hour", "minute", "second",
};
/* Callback handler to send event after all samples are received and captured */
static int hid_time_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id, void *priv)
{
unsigned long flags;
struct hid_time_state *time_state = platform_get_drvdata(priv);
spin_lock_irqsave(&time_state->lock_last_time, flags);
time_state->last_time = time_state->time_buf;
spin_unlock_irqrestore(&time_state->lock_last_time, flags);
complete(&time_state->comp_last_time);
return 0;
}
static u32 hid_time_value(size_t raw_len, char *raw_data)
{
switch (raw_len) {
case 1:
return *(u8 *)raw_data;
case 2:
return *(u16 *)raw_data;
case 4:
return *(u32 *)raw_data;
default:
return (u32)(~0U); /* 0xff... or -1 to denote an error */
}
}
static int hid_time_capture_sample(struct hid_sensor_hub_device *hsdev,
unsigned usage_id, size_t raw_len,
char *raw_data, void *priv)
{
struct hid_time_state *time_state = platform_get_drvdata(priv);
struct rtc_time *time_buf = &time_state->time_buf;
switch (usage_id) {
case HID_USAGE_SENSOR_TIME_YEAR:
/*
* The draft for HID-sensors (HUTRR39) currently doesn't define
* the range for the year attribute. Therefor we support
* 8 bit (0-99) and 16 or 32 bits (full) as size for the year.
*/
if (raw_len == 1) {
time_buf->tm_year = *(u8 *)raw_data;
if (time_buf->tm_year < 70)
/* assume we are in 1970...2069 */
time_buf->tm_year += 100;
} else
time_buf->tm_year =
(int)hid_time_value(raw_len, raw_data)-1900;
break;
case HID_USAGE_SENSOR_TIME_MONTH:
/* sensors are sending the month as 1-12, we need 0-11 */
time_buf->tm_mon = (int)hid_time_value(raw_len, raw_data)-1;
break;
case HID_USAGE_SENSOR_TIME_DAY:
time_buf->tm_mday = (int)hid_time_value(raw_len, raw_data);
break;
case HID_USAGE_SENSOR_TIME_HOUR:
time_buf->tm_hour = (int)hid_time_value(raw_len, raw_data);
break;
case HID_USAGE_SENSOR_TIME_MINUTE:
time_buf->tm_min = (int)hid_time_value(raw_len, raw_data);
break;
case HID_USAGE_SENSOR_TIME_SECOND:
time_buf->tm_sec = (int)hid_time_value(raw_len, raw_data);
break;
default:
return -EINVAL;
}
return 0;
}
/* small helper, haven't found any other way */
static const char *hid_time_attrib_name(u32 attrib_id)
{
static const char unknown[] = "unknown";
unsigned i;
for (i = 0; i < TIME_RTC_CHANNEL_MAX; ++i) {
if (hid_time_addresses[i] == attrib_id)
return hid_time_channel_names[i];
}
return unknown; /* should never happen */
}
static int hid_time_parse_report(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
struct hid_time_state *time_state)
{
int report_id, i;
for (i = 0; i < TIME_RTC_CHANNEL_MAX; ++i)
if (sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT, usage_id,
hid_time_addresses[i],
&time_state->info[i]) < 0)
return -EINVAL;
/* Check the (needed) attributes for sanity */
report_id = time_state->info[0].report_id;
if (report_id < 0) {
dev_err(&pdev->dev, "bad report ID!\n");
return -EINVAL;
}
for (i = 0; i < TIME_RTC_CHANNEL_MAX; ++i) {
if (time_state->info[i].report_id != report_id) {
dev_err(&pdev->dev,
"not all needed attributes inside the same report!\n");
return -EINVAL;
}
if (time_state->info[i].size == 3 ||
time_state->info[i].size > 4) {
dev_err(&pdev->dev,
"attribute '%s' not 8, 16 or 32 bits wide!\n",
hid_time_attrib_name(
time_state->info[i].attrib_id));
return -EINVAL;
}
if (time_state->info[i].units !=
HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED &&
/* allow attribute seconds with unit seconds */
!(time_state->info[i].attrib_id ==
HID_USAGE_SENSOR_TIME_SECOND &&
time_state->info[i].units ==
HID_USAGE_SENSOR_UNITS_SECOND)) {
dev_err(&pdev->dev,
"attribute '%s' hasn't a unit of type 'none'!\n",
hid_time_attrib_name(
time_state->info[i].attrib_id));
return -EINVAL;
}
if (time_state->info[i].unit_expo) {
dev_err(&pdev->dev,
"attribute '%s' hasn't a unit exponent of 1!\n",
hid_time_attrib_name(
time_state->info[i].attrib_id));
return -EINVAL;
}
}
return 0;
}
static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned long flags;
struct hid_time_state *time_state = dev_get_drvdata(dev);
int ret;
reinit_completion(&time_state->comp_last_time);
/* get a report with all values through requesting one value */
sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
time_state->info[0].report_id, SENSOR_HUB_SYNC, false);
/* wait for all values (event) */
ret = wait_for_completion_killable_timeout(
&time_state->comp_last_time, HZ*6);
if (ret > 0) {
/* no error */
spin_lock_irqsave(&time_state->lock_last_time, flags);
*tm = time_state->last_time;
spin_unlock_irqrestore(&time_state->lock_last_time, flags);
return 0;
}
if (!ret)
return -EIO; /* timeouted */
return ret; /* killed (-ERESTARTSYS) */
}
static const struct rtc_class_ops hid_time_rtc_ops = {
.read_time = hid_rtc_read_time,
};
static int hid_time_probe(struct platform_device *pdev)
{
int ret = 0;
struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
struct hid_time_state *time_state = devm_kzalloc(&pdev->dev,
sizeof(struct hid_time_state), GFP_KERNEL);
if (time_state == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, time_state);
spin_lock_init(&time_state->lock_last_time);
init_completion(&time_state->comp_last_time);
time_state->common_attributes.hsdev = hsdev;
time_state->common_attributes.pdev = pdev;
ret = hid_sensor_parse_common_attributes(hsdev,
HID_USAGE_SENSOR_TIME,
&time_state->common_attributes,
NULL,
0);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes!\n");
return ret;
}
ret = hid_time_parse_report(pdev, hsdev, HID_USAGE_SENSOR_TIME,
time_state);
if (ret) {
dev_err(&pdev->dev, "failed to setup attributes!\n");
return ret;
}
time_state->callbacks.send_event = hid_time_proc_event;
time_state->callbacks.capture_sample = hid_time_capture_sample;
time_state->callbacks.pdev = pdev;
ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_TIME,
&time_state->callbacks);
if (ret < 0) {
dev_err(&pdev->dev, "register callback failed!\n");
return ret;
}
ret = sensor_hub_device_open(hsdev);
if (ret) {
dev_err(&pdev->dev, "failed to open sensor hub device!\n");
goto err_open;
}
/*
* Enable HID input processing early in order to be able to read the
* clock already in devm_rtc_device_register().
*/
hid_device_io_start(hsdev->hdev);
time_state->rtc = devm_rtc_device_register(&pdev->dev,
"hid-sensor-time", &hid_time_rtc_ops,
THIS_MODULE);
if (IS_ERR(time_state->rtc)) {
hid_device_io_stop(hsdev->hdev);
ret = PTR_ERR(time_state->rtc);
time_state->rtc = NULL;
dev_err(&pdev->dev, "rtc device register failed!\n");
goto err_rtc;
}
return ret;
err_rtc:
sensor_hub_device_close(hsdev);
err_open:
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
return ret;
}
static void hid_time_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
sensor_hub_device_close(hsdev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
}
static const struct platform_device_id hid_time_ids[] = {
{
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-2000a0",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, hid_time_ids);
static struct platform_driver hid_time_platform_driver = {
.id_table = hid_time_ids,
.driver = {
.name = KBUILD_MODNAME,
},
.probe = hid_time_probe,
.remove_new = hid_time_remove,
};
module_platform_driver(hid_time_platform_driver);
MODULE_DESCRIPTION("HID Sensor Time");
MODULE_AUTHOR("Alexander Holler <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(IIO_HID);
| linux-master | drivers/rtc/rtc-hid-sensor-time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
*/
#include <linux/bcd.h>
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/of.h>
#include <linux/pm_wakeirq.h>
#define PCF8523_REG_CONTROL1 0x00
#define PCF8523_CONTROL1_CAP_SEL BIT(7)
#define PCF8523_CONTROL1_STOP BIT(5)
#define PCF8523_CONTROL1_AIE BIT(1)
#define PCF8523_REG_CONTROL2 0x01
#define PCF8523_CONTROL2_AF BIT(3)
#define PCF8523_REG_CONTROL3 0x02
#define PCF8523_CONTROL3_PM GENMASK(7, 5)
#define PCF8523_PM_STANDBY 0x7
#define PCF8523_CONTROL3_BLF BIT(2) /* battery low bit, read-only */
#define PCF8523_CONTROL3_BSF BIT(3)
#define PCF8523_REG_SECONDS 0x03
#define PCF8523_SECONDS_OS BIT(7)
#define PCF8523_REG_MINUTES 0x04
#define PCF8523_REG_HOURS 0x05
#define PCF8523_REG_DAYS 0x06
#define PCF8523_REG_WEEKDAYS 0x07
#define PCF8523_REG_MONTHS 0x08
#define PCF8523_REG_YEARS 0x09
#define PCF8523_REG_MINUTE_ALARM 0x0a
#define PCF8523_REG_HOUR_ALARM 0x0b
#define PCF8523_REG_DAY_ALARM 0x0c
#define PCF8523_REG_WEEKDAY_ALARM 0x0d
#define ALARM_DIS BIT(7)
#define PCF8523_REG_OFFSET 0x0e
#define PCF8523_OFFSET_MODE BIT(7)
#define PCF8523_TMR_CLKOUT_CTRL 0x0f
struct pcf8523 {
struct rtc_device *rtc;
struct regmap *regmap;
};
static int pcf8523_load_capacitance(struct pcf8523 *pcf8523, struct device_node *node)
{
u32 load, value = 0;
load = 12500;
of_property_read_u32(node, "quartz-load-femtofarads", &load);
switch (load) {
default:
dev_warn(&pcf8523->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
load);
fallthrough;
case 12500:
value = PCF8523_CONTROL1_CAP_SEL;
break;
case 7000:
break;
}
return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
PCF8523_CONTROL1_CAP_SEL, value);
}
static irqreturn_t pcf8523_irq(int irq, void *dev_id)
{
struct pcf8523 *pcf8523 = dev_id;
u32 value;
int err;
err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL2, &value);
if (err < 0)
return IRQ_HANDLED;
if (value & PCF8523_CONTROL2_AF) {
value &= ~PCF8523_CONTROL2_AF;
regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL2, value);
rtc_update_irq(pcf8523->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
u8 regs[10];
int err;
err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_CONTROL1, regs,
sizeof(regs));
if (err < 0)
return err;
if ((regs[0] & PCF8523_CONTROL1_STOP) || (regs[3] & PCF8523_SECONDS_OS))
return -EINVAL;
tm->tm_sec = bcd2bin(regs[3] & 0x7f);
tm->tm_min = bcd2bin(regs[4] & 0x7f);
tm->tm_hour = bcd2bin(regs[5] & 0x3f);
tm->tm_mday = bcd2bin(regs[6] & 0x3f);
tm->tm_wday = regs[7] & 0x7;
tm->tm_mon = bcd2bin(regs[8] & 0x1f) - 1;
tm->tm_year = bcd2bin(regs[9]) + 100;
return 0;
}
static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
u8 regs[7];
int err;
err = regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
PCF8523_CONTROL1_STOP, PCF8523_CONTROL1_STOP);
if (err < 0)
return err;
/* This will purposely overwrite PCF8523_SECONDS_OS */
regs[0] = bin2bcd(tm->tm_sec);
regs[1] = bin2bcd(tm->tm_min);
regs[2] = bin2bcd(tm->tm_hour);
regs[3] = bin2bcd(tm->tm_mday);
regs[4] = tm->tm_wday;
regs[5] = bin2bcd(tm->tm_mon + 1);
regs[6] = bin2bcd(tm->tm_year - 100);
err = regmap_bulk_write(pcf8523->regmap, PCF8523_REG_SECONDS, regs,
sizeof(regs));
if (err < 0) {
/*
* If the time cannot be set, restart the RTC anyway. Note
* that errors are ignored if the RTC cannot be started so
* that we have a chance to propagate the original error.
*/
regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
PCF8523_CONTROL1_STOP, 0);
return err;
}
return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
PCF8523_CONTROL1_STOP, 0);
}
static int pcf8523_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
u8 regs[4];
u32 value;
int err;
err = regmap_bulk_read(pcf8523->regmap, PCF8523_REG_MINUTE_ALARM, regs,
sizeof(regs));
if (err < 0)
return err;
tm->time.tm_sec = 0;
tm->time.tm_min = bcd2bin(regs[0] & 0x7F);
tm->time.tm_hour = bcd2bin(regs[1] & 0x3F);
tm->time.tm_mday = bcd2bin(regs[2] & 0x3F);
tm->time.tm_wday = bcd2bin(regs[3] & 0x7);
err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL1, &value);
if (err < 0)
return err;
tm->enabled = !!(value & PCF8523_CONTROL1_AIE);
err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL2, &value);
if (err < 0)
return err;
tm->pending = !!(value & PCF8523_CONTROL2_AF);
return 0;
}
static int pcf8523_irq_enable(struct device *dev, unsigned int enabled)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL1,
PCF8523_CONTROL1_AIE, enabled ?
PCF8523_CONTROL1_AIE : 0);
}
static int pcf8523_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
u8 regs[5];
int err;
err = pcf8523_irq_enable(dev, 0);
if (err)
return err;
err = regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL2, 0);
if (err < 0)
return err;
regs[0] = bin2bcd(tm->time.tm_min);
regs[1] = bin2bcd(tm->time.tm_hour);
regs[2] = bin2bcd(tm->time.tm_mday);
regs[3] = ALARM_DIS;
err = regmap_bulk_write(pcf8523->regmap, PCF8523_REG_MINUTE_ALARM, regs,
sizeof(regs));
if (err < 0)
return err;
if (tm->enabled)
return pcf8523_irq_enable(dev, tm->enabled);
return 0;
}
static int pcf8523_param_get(struct device *dev, struct rtc_param *param)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
int ret;
u32 value;
switch (param->param) {
case RTC_PARAM_BACKUP_SWITCH_MODE:
ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
if (ret < 0)
return ret;
value = FIELD_GET(PCF8523_CONTROL3_PM, value);
switch (value) {
case 0x0:
case 0x4:
param->uvalue = RTC_BSM_LEVEL;
break;
case 0x1:
case 0x5:
param->uvalue = RTC_BSM_DIRECT;
break;
case PCF8523_PM_STANDBY:
param->uvalue = RTC_BSM_STANDBY;
break;
default:
param->uvalue = RTC_BSM_DISABLED;
}
break;
default:
return -EINVAL;
}
return 0;
}
static int pcf8523_param_set(struct device *dev, struct rtc_param *param)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
u8 mode;
switch (param->param) {
case RTC_PARAM_BACKUP_SWITCH_MODE:
switch (param->uvalue) {
case RTC_BSM_DISABLED:
mode = 0x2;
break;
case RTC_BSM_DIRECT:
mode = 0x1;
break;
case RTC_BSM_LEVEL:
mode = 0x0;
break;
case RTC_BSM_STANDBY:
mode = PCF8523_PM_STANDBY;
break;
default:
return -EINVAL;
}
return regmap_update_bits(pcf8523->regmap, PCF8523_REG_CONTROL3,
PCF8523_CONTROL3_PM,
FIELD_PREP(PCF8523_CONTROL3_PM, mode));
break;
default:
return -EINVAL;
}
return 0;
}
static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
unsigned int flags = 0;
u32 value;
int ret;
switch (cmd) {
case RTC_VL_READ:
ret = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
if (ret < 0)
return ret;
if (value & PCF8523_CONTROL3_BLF)
flags |= RTC_VL_BACKUP_LOW;
ret = regmap_read(pcf8523->regmap, PCF8523_REG_SECONDS, &value);
if (ret < 0)
return ret;
if (value & PCF8523_SECONDS_OS)
flags |= RTC_VL_DATA_INVALID;
return put_user(flags, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
int err;
u32 value;
s8 val;
err = regmap_read(pcf8523->regmap, PCF8523_REG_OFFSET, &value);
if (err < 0)
return err;
/* sign extend the 7-bit offset value */
val = value << 1;
*offset = (value & PCF8523_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
return 0;
}
static int pcf8523_rtc_set_offset(struct device *dev, long offset)
{
struct pcf8523 *pcf8523 = dev_get_drvdata(dev);
long reg_m0, reg_m1;
u32 value;
reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L);
reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L);
if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset))
value = reg_m0 & 0x7f;
else
value = (reg_m1 & 0x7f) | PCF8523_OFFSET_MODE;
return regmap_write(pcf8523->regmap, PCF8523_REG_OFFSET, value);
}
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
.read_alarm = pcf8523_rtc_read_alarm,
.set_alarm = pcf8523_rtc_set_alarm,
.alarm_irq_enable = pcf8523_irq_enable,
.ioctl = pcf8523_rtc_ioctl,
.read_offset = pcf8523_rtc_read_offset,
.set_offset = pcf8523_rtc_set_offset,
.param_get = pcf8523_param_get,
.param_set = pcf8523_param_set,
};
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x13,
};
static int pcf8523_probe(struct i2c_client *client)
{
struct pcf8523 *pcf8523;
struct rtc_device *rtc;
bool wakeup_source = false;
u32 value;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
pcf8523 = devm_kzalloc(&client->dev, sizeof(struct pcf8523), GFP_KERNEL);
if (!pcf8523)
return -ENOMEM;
pcf8523->regmap = devm_regmap_init_i2c(client, ®map_config);
if (IS_ERR(pcf8523->regmap))
return PTR_ERR(pcf8523->regmap);
i2c_set_clientdata(client, pcf8523);
rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
pcf8523->rtc = rtc;
err = pcf8523_load_capacitance(pcf8523, client->dev.of_node);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
err);
err = regmap_read(pcf8523->regmap, PCF8523_REG_SECONDS, &value);
if (err < 0)
return err;
if (value & PCF8523_SECONDS_OS) {
err = regmap_read(pcf8523->regmap, PCF8523_REG_CONTROL3, &value);
if (err < 0)
return err;
if (FIELD_GET(PCF8523_CONTROL3_PM, value) == PCF8523_PM_STANDBY) {
err = regmap_write(pcf8523->regmap, PCF8523_REG_CONTROL3,
value & ~PCF8523_CONTROL3_PM);
if (err < 0)
return err;
}
}
rtc->ops = &pcf8523_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&client->dev))
irqflags = 0;
err = regmap_write(pcf8523->regmap, PCF8523_TMR_CLKOUT_CTRL, 0x38);
if (err < 0)
return err;
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf8523_irq,
IRQF_SHARED | IRQF_ONESHOT | irqflags,
dev_name(&rtc->dev), pcf8523);
if (err)
return err;
dev_pm_set_wake_irq(&client->dev, client->irq);
}
wakeup_source = of_property_read_bool(client->dev.of_node, "wakeup-source");
if (client->irq > 0 || wakeup_source)
device_init_wakeup(&client->dev, true);
return devm_rtc_register_device(rtc);
}
static const struct i2c_device_id pcf8523_id[] = {
{ "pcf8523", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8523_id);
static const struct of_device_id pcf8523_of_match[] = {
{ .compatible = "nxp,pcf8523" },
{ .compatible = "microcrystal,rv8523" },
{ }
};
MODULE_DEVICE_TABLE(of, pcf8523_of_match);
static struct i2c_driver pcf8523_driver = {
.driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
},
.probe = pcf8523_probe,
.id_table = pcf8523_id,
};
module_i2c_driver(pcf8523_driver);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("NXP PCF8523 RTC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-pcf8523.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The Netronix embedded controller is a microcontroller found in some
* e-book readers designed by the original design manufacturer Netronix, Inc.
* It contains RTC, battery monitoring, system power management, and PWM
* functionality.
*
* This driver implements access to the RTC time and date.
*
* Copyright 2020 Jonathan Neuschäfer <[email protected]>
*/
#include <linux/mfd/ntxec.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/types.h>
struct ntxec_rtc {
struct device *dev;
struct ntxec *ec;
};
#define NTXEC_REG_WRITE_YEAR 0x10
#define NTXEC_REG_WRITE_MONTH 0x11
#define NTXEC_REG_WRITE_DAY 0x12
#define NTXEC_REG_WRITE_HOUR 0x13
#define NTXEC_REG_WRITE_MINUTE 0x14
#define NTXEC_REG_WRITE_SECOND 0x15
#define NTXEC_REG_READ_YEAR_MONTH 0x20
#define NTXEC_REG_READ_MDAY_HOUR 0x21
#define NTXEC_REG_READ_MINUTE_SECOND 0x23
static int ntxec_read_time(struct device *dev, struct rtc_time *tm)
{
struct ntxec_rtc *rtc = dev_get_drvdata(dev);
unsigned int value;
int res;
retry:
res = regmap_read(rtc->ec->regmap, NTXEC_REG_READ_MINUTE_SECOND, &value);
if (res < 0)
return res;
tm->tm_min = value >> 8;
tm->tm_sec = value & 0xff;
res = regmap_read(rtc->ec->regmap, NTXEC_REG_READ_MDAY_HOUR, &value);
if (res < 0)
return res;
tm->tm_mday = value >> 8;
tm->tm_hour = value & 0xff;
res = regmap_read(rtc->ec->regmap, NTXEC_REG_READ_YEAR_MONTH, &value);
if (res < 0)
return res;
tm->tm_year = (value >> 8) + 100;
tm->tm_mon = (value & 0xff) - 1;
/*
* Read the minutes/seconds field again. If it changed since the first
* read, we can't assume that the values read so far are consistent,
* and should start from the beginning.
*/
res = regmap_read(rtc->ec->regmap, NTXEC_REG_READ_MINUTE_SECOND, &value);
if (res < 0)
return res;
if (tm->tm_min != value >> 8 || tm->tm_sec != (value & 0xff))
goto retry;
return 0;
}
static int ntxec_set_time(struct device *dev, struct rtc_time *tm)
{
struct ntxec_rtc *rtc = dev_get_drvdata(dev);
/*
* To avoid time overflows while we're writing the full date/time,
* set the seconds field to zero before doing anything else. For the
* next 59 seconds (plus however long it takes until the RTC's next
* update of the second field), the seconds field will not overflow
* into the other fields.
*/
struct reg_sequence regs[] = {
{ NTXEC_REG_WRITE_SECOND, ntxec_reg8(0) },
{ NTXEC_REG_WRITE_YEAR, ntxec_reg8(tm->tm_year - 100) },
{ NTXEC_REG_WRITE_MONTH, ntxec_reg8(tm->tm_mon + 1) },
{ NTXEC_REG_WRITE_DAY, ntxec_reg8(tm->tm_mday) },
{ NTXEC_REG_WRITE_HOUR, ntxec_reg8(tm->tm_hour) },
{ NTXEC_REG_WRITE_MINUTE, ntxec_reg8(tm->tm_min) },
{ NTXEC_REG_WRITE_SECOND, ntxec_reg8(tm->tm_sec) },
};
return regmap_multi_reg_write(rtc->ec->regmap, regs, ARRAY_SIZE(regs));
}
static const struct rtc_class_ops ntxec_rtc_ops = {
.read_time = ntxec_read_time,
.set_time = ntxec_set_time,
};
static int ntxec_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *dev;
struct ntxec_rtc *rtc;
pdev->dev.of_node = pdev->dev.parent->of_node;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->dev = &pdev->dev;
rtc->ec = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, rtc);
dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
dev->ops = &ntxec_rtc_ops;
dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
dev->range_max = 9025257599LL; /* 2255-12-31 23:59:59 */
return devm_rtc_register_device(dev);
}
static struct platform_driver ntxec_rtc_driver = {
.driver = {
.name = "ntxec-rtc",
},
.probe = ntxec_rtc_probe,
};
module_platform_driver(ntxec_rtc_driver);
MODULE_AUTHOR("Jonathan Neuschäfer <[email protected]>");
MODULE_DESCRIPTION("RTC driver for Netronix EC");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ntxec-rtc");
| linux-master | drivers/rtc/rtc-ntxec.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <[email protected]>
* Copyright (C) 2010, Paul Cercueil <[email protected]>
* JZ4740 SoC RTC driver
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/property.h>
#include <linux/reboot.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define JZ_REG_RTC_CTRL 0x00
#define JZ_REG_RTC_SEC 0x04
#define JZ_REG_RTC_SEC_ALARM 0x08
#define JZ_REG_RTC_REGULATOR 0x0C
#define JZ_REG_RTC_HIBERNATE 0x20
#define JZ_REG_RTC_WAKEUP_FILTER 0x24
#define JZ_REG_RTC_RESET_COUNTER 0x28
#define JZ_REG_RTC_SCRATCHPAD 0x34
#define JZ_REG_RTC_CKPCR 0x40
/* The following are present on the jz4780 */
#define JZ_REG_RTC_WENR 0x3C
#define JZ_RTC_WENR_WEN BIT(31)
#define JZ_RTC_CTRL_WRDY BIT(7)
#define JZ_RTC_CTRL_1HZ BIT(6)
#define JZ_RTC_CTRL_1HZ_IRQ BIT(5)
#define JZ_RTC_CTRL_AF BIT(4)
#define JZ_RTC_CTRL_AF_IRQ BIT(3)
#define JZ_RTC_CTRL_AE BIT(2)
#define JZ_RTC_CTRL_ENABLE BIT(0)
/* Magic value to enable writes on jz4780 */
#define JZ_RTC_WENR_MAGIC 0xA55A
#define JZ_RTC_WAKEUP_FILTER_MASK 0x0000FFE0
#define JZ_RTC_RESET_COUNTER_MASK 0x00000FE0
#define JZ_RTC_CKPCR_CK32PULL_DIS BIT(4)
#define JZ_RTC_CKPCR_CK32CTL_EN (BIT(2) | BIT(1))
enum jz4740_rtc_type {
ID_JZ4740,
ID_JZ4760,
ID_JZ4780,
};
struct jz4740_rtc {
void __iomem *base;
enum jz4740_rtc_type type;
struct rtc_device *rtc;
struct clk_hw clk32k;
spinlock_t lock;
};
static struct device *dev_for_power_off;
static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
{
return readl(rtc->base + reg);
}
static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
return readl_poll_timeout(rtc->base + JZ_REG_RTC_CTRL, ctrl,
ctrl & JZ_RTC_CTRL_WRDY, 0, 1000);
}
static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
int ret;
ret = jz4740_rtc_wait_write_ready(rtc);
if (ret != 0)
return ret;
writel(JZ_RTC_WENR_MAGIC, rtc->base + JZ_REG_RTC_WENR);
return readl_poll_timeout(rtc->base + JZ_REG_RTC_WENR, ctrl,
ctrl & JZ_RTC_WENR_WEN, 0, 1000);
}
static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg,
uint32_t val)
{
int ret = 0;
if (rtc->type >= ID_JZ4760)
ret = jz4780_rtc_enable_write(rtc);
if (ret == 0)
ret = jz4740_rtc_wait_write_ready(rtc);
if (ret == 0)
writel(val, rtc->base + reg);
return ret;
}
static int jz4740_rtc_ctrl_set_bits(struct jz4740_rtc *rtc, uint32_t mask,
bool set)
{
int ret;
unsigned long flags;
uint32_t ctrl;
spin_lock_irqsave(&rtc->lock, flags);
ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
/* Don't clear interrupt flags by accident */
ctrl |= JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF;
if (set)
ctrl |= mask;
else
ctrl &= ~mask;
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CTRL, ctrl);
spin_unlock_irqrestore(&rtc->lock, flags);
return ret;
}
static int jz4740_rtc_read_time(struct device *dev, struct rtc_time *time)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
uint32_t secs, secs2;
int timeout = 5;
if (jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD) != 0x12345678)
return -EINVAL;
/* If the seconds register is read while it is updated, it can contain a
* bogus value. This can be avoided by making sure that two consecutive
* reads have the same value.
*/
secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC);
secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC);
while (secs != secs2 && --timeout) {
secs = secs2;
secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC);
}
if (timeout == 0)
return -EIO;
rtc_time64_to_tm(secs, time);
return 0;
}
static int jz4740_rtc_set_time(struct device *dev, struct rtc_time *time)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
int ret;
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, rtc_tm_to_time64(time));
if (ret)
return ret;
return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
}
static int jz4740_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
uint32_t secs;
uint32_t ctrl;
secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC_ALARM);
ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
alrm->enabled = !!(ctrl & JZ_RTC_CTRL_AE);
alrm->pending = !!(ctrl & JZ_RTC_CTRL_AF);
rtc_time64_to_tm(secs, &alrm->time);
return 0;
}
static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
int ret;
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
uint32_t secs = lower_32_bits(rtc_tm_to_time64(&alrm->time));
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs);
if (!ret)
ret = jz4740_rtc_ctrl_set_bits(rtc,
JZ_RTC_CTRL_AE | JZ_RTC_CTRL_AF_IRQ, alrm->enabled);
return ret;
}
static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable);
}
static const struct rtc_class_ops jz4740_rtc_ops = {
.read_time = jz4740_rtc_read_time,
.set_time = jz4740_rtc_set_time,
.read_alarm = jz4740_rtc_read_alarm,
.set_alarm = jz4740_rtc_set_alarm,
.alarm_irq_enable = jz4740_rtc_alarm_irq_enable,
};
static irqreturn_t jz4740_rtc_irq(int irq, void *data)
{
struct jz4740_rtc *rtc = data;
uint32_t ctrl;
unsigned long events = 0;
ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
if (ctrl & JZ_RTC_CTRL_1HZ)
events |= (RTC_UF | RTC_IRQF);
if (ctrl & JZ_RTC_CTRL_AF)
events |= (RTC_AF | RTC_IRQF);
rtc_update_irq(rtc->rtc, 1, events);
jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF, false);
return IRQ_HANDLED;
}
static void jz4740_rtc_poweroff(struct device *dev)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_HIBERNATE, 1);
}
static void jz4740_rtc_power_off(void)
{
jz4740_rtc_poweroff(dev_for_power_off);
kernel_halt();
}
static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
{ .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
{ .compatible = "ingenic,jz4770-rtc", .data = (void *)ID_JZ4780 },
{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
{},
};
MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
static void jz4740_rtc_set_wakeup_params(struct jz4740_rtc *rtc,
struct device_node *np,
unsigned long rate)
{
unsigned long wakeup_ticks, reset_ticks;
unsigned int min_wakeup_pin_assert_time = 60; /* Default: 60ms */
unsigned int reset_pin_assert_time = 100; /* Default: 100ms */
of_property_read_u32(np, "ingenic,reset-pin-assert-time-ms",
&reset_pin_assert_time);
of_property_read_u32(np, "ingenic,min-wakeup-pin-assert-time-ms",
&min_wakeup_pin_assert_time);
/*
* Set minimum wakeup pin assertion time: 100 ms.
* Range is 0 to 2 sec if RTC is clocked at 32 kHz.
*/
wakeup_ticks = (min_wakeup_pin_assert_time * rate) / 1000;
if (wakeup_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
wakeup_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
else
wakeup_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_WAKEUP_FILTER, wakeup_ticks);
/*
* Set reset pin low-level assertion time after wakeup: 60 ms.
* Range is 0 to 125 ms if RTC is clocked at 32 kHz.
*/
reset_ticks = (reset_pin_assert_time * rate) / 1000;
if (reset_ticks < JZ_RTC_RESET_COUNTER_MASK)
reset_ticks &= JZ_RTC_RESET_COUNTER_MASK;
else
reset_ticks = JZ_RTC_RESET_COUNTER_MASK;
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_RESET_COUNTER, reset_ticks);
}
static int jz4740_rtc_clk32k_enable(struct clk_hw *hw)
{
struct jz4740_rtc *rtc = container_of(hw, struct jz4740_rtc, clk32k);
return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CKPCR,
JZ_RTC_CKPCR_CK32PULL_DIS |
JZ_RTC_CKPCR_CK32CTL_EN);
}
static void jz4740_rtc_clk32k_disable(struct clk_hw *hw)
{
struct jz4740_rtc *rtc = container_of(hw, struct jz4740_rtc, clk32k);
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CKPCR, 0);
}
static int jz4740_rtc_clk32k_is_enabled(struct clk_hw *hw)
{
struct jz4740_rtc *rtc = container_of(hw, struct jz4740_rtc, clk32k);
u32 ckpcr;
ckpcr = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CKPCR);
return !!(ckpcr & JZ_RTC_CKPCR_CK32CTL_EN);
}
static const struct clk_ops jz4740_rtc_clk32k_ops = {
.enable = jz4740_rtc_clk32k_enable,
.disable = jz4740_rtc_clk32k_disable,
.is_enabled = jz4740_rtc_clk32k_is_enabled,
};
static int jz4740_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct jz4740_rtc *rtc;
unsigned long rate;
struct clk *clk;
int ret, irq;
rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->type = (uintptr_t)device_get_match_data(dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
clk = devm_clk_get_enabled(dev, "rtc");
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk), "Failed to get RTC clock\n");
spin_lock_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
device_init_wakeup(dev, 1);
ret = dev_pm_set_wake_irq(dev, irq);
if (ret)
return dev_err_probe(dev, ret, "Failed to set wake irq\n");
rtc->rtc = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc))
return dev_err_probe(dev, PTR_ERR(rtc->rtc),
"Failed to allocate rtc device\n");
rtc->rtc->ops = &jz4740_rtc_ops;
rtc->rtc->range_max = U32_MAX;
rate = clk_get_rate(clk);
jz4740_rtc_set_wakeup_params(rtc, np, rate);
/* Each 1 Hz pulse should happen after (rate) ticks */
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_REGULATOR, rate - 1);
ret = devm_rtc_register_device(rtc->rtc);
if (ret)
return ret;
ret = devm_request_irq(dev, irq, jz4740_rtc_irq, 0,
pdev->name, rtc);
if (ret)
return dev_err_probe(dev, ret, "Failed to request rtc irq\n");
if (of_device_is_system_power_controller(np)) {
dev_for_power_off = dev;
if (!pm_power_off)
pm_power_off = jz4740_rtc_power_off;
else
dev_warn(dev, "Poweroff handler already present!\n");
}
if (device_property_present(dev, "#clock-cells")) {
rtc->clk32k.init = CLK_HW_INIT_HW("clk32k", __clk_get_hw(clk),
&jz4740_rtc_clk32k_ops, 0);
ret = devm_clk_hw_register(dev, &rtc->clk32k);
if (ret)
return dev_err_probe(dev, ret,
"Unable to register clk32k clock\n");
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
&rtc->clk32k);
if (ret)
return dev_err_probe(dev, ret,
"Unable to register clk32k clock provider\n");
}
return 0;
}
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.driver = {
.name = "jz4740-rtc",
.of_match_table = jz4740_rtc_of_match,
},
};
module_platform_driver(jz4740_rtc_driver);
MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
MODULE_ALIAS("platform:jz4740-rtc");
| linux-master | drivers/rtc/rtc-jz4740.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Real Time Clock interface for StrongARM SA1x00 and XScale PXA2xx
*
* Copyright (c) 2000 Nils Faerber
*
* Based on rtc.c by Paul Gortmaker
*
* Original Driver by Nils Faerber <[email protected]>
*
* Modifications from:
* CIH <[email protected]>
* Nicolas Pitre <[email protected]>
* Andrew Christian <[email protected]>
*
* Converted to the RTC subsystem and Driver Model
* by Richard Purdie <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/bitops.h>
#include <linux/io.h>
#define RTSR_HZE BIT(3) /* HZ interrupt enable */
#define RTSR_ALE BIT(2) /* RTC alarm interrupt enable */
#define RTSR_HZ BIT(1) /* HZ rising-edge detected */
#define RTSR_AL BIT(0) /* RTC alarm detected */
#include "rtc-sa1100.h"
#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
#define RTC_FREQ 1024
static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
{
struct sa1100_rtc *info = dev_get_drvdata(dev_id);
struct rtc_device *rtc = info->rtc;
unsigned int rtsr;
unsigned long events = 0;
spin_lock(&info->lock);
rtsr = readl_relaxed(info->rtsr);
/* clear interrupt sources */
writel_relaxed(0, info->rtsr);
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_probe(). */
if (rtsr & (RTSR_ALE | RTSR_HZE)) {
/* This is the original code, before there was the if test
* above. This code does not clear interrupts that were not
* enabled. */
writel_relaxed((RTSR_AL | RTSR_HZ) & (rtsr >> 2), info->rtsr);
} else {
/* For some reason, it is possible to enter this routine
* without interruptions enabled, it has been tested with
* several units (Bug in SA11xx chip?).
*
* This situation leads to an infinite "loop" of interrupt
* routine calling and as a result the processor seems to
* lock on its first call to open(). */
writel_relaxed(RTSR_AL | RTSR_HZ, info->rtsr);
}
/* clear alarm interrupt if it has occurred */
if (rtsr & RTSR_AL)
rtsr &= ~RTSR_ALE;
writel_relaxed(rtsr & (RTSR_ALE | RTSR_HZE), info->rtsr);
/* update irq data & counter */
if (rtsr & RTSR_AL)
events |= RTC_AF | RTC_IRQF;
if (rtsr & RTSR_HZ)
events |= RTC_UF | RTC_IRQF;
rtc_update_irq(rtc, 1, events);
spin_unlock(&info->lock);
return IRQ_HANDLED;
}
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
u32 rtsr;
struct sa1100_rtc *info = dev_get_drvdata(dev);
spin_lock_irq(&info->lock);
rtsr = readl_relaxed(info->rtsr);
if (enabled)
rtsr |= RTSR_ALE;
else
rtsr &= ~RTSR_ALE;
writel_relaxed(rtsr, info->rtsr);
spin_unlock_irq(&info->lock);
return 0;
}
static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
rtc_time64_to_tm(readl_relaxed(info->rcnr), tm);
return 0;
}
static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
writel_relaxed(rtc_tm_to_time64(tm), info->rcnr);
return 0;
}
static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
u32 rtsr;
struct sa1100_rtc *info = dev_get_drvdata(dev);
rtsr = readl_relaxed(info->rtsr);
alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0;
alrm->pending = (rtsr & RTSR_AL) ? 1 : 0;
return 0;
}
static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
spin_lock_irq(&info->lock);
writel_relaxed(readl_relaxed(info->rtsr) &
(RTSR_HZE | RTSR_ALE | RTSR_AL), info->rtsr);
writel_relaxed(rtc_tm_to_time64(&alrm->time), info->rtar);
if (alrm->enabled)
writel_relaxed(readl_relaxed(info->rtsr) | RTSR_ALE, info->rtsr);
else
writel_relaxed(readl_relaxed(info->rtsr) & ~RTSR_ALE, info->rtsr);
spin_unlock_irq(&info->lock);
return 0;
}
static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
seq_printf(seq, "trim/divider\t\t: 0x%08x\n", readl_relaxed(info->rttr));
seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", readl_relaxed(info->rtsr));
return 0;
}
static const struct rtc_class_ops sa1100_rtc_ops = {
.read_time = sa1100_rtc_read_time,
.set_time = sa1100_rtc_set_time,
.read_alarm = sa1100_rtc_read_alarm,
.set_alarm = sa1100_rtc_set_alarm,
.proc = sa1100_rtc_proc,
.alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
};
int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
{
int ret;
spin_lock_init(&info->lock);
info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to find rtc clock source\n");
return PTR_ERR(info->clk);
}
ret = clk_prepare_enable(info->clk);
if (ret)
return ret;
/*
* According to the manual we should be able to let RTTR be zero
* and then a default diviser for a 32.768KHz clock is used.
* Apparently this doesn't work, at least for my SA1110 rev 5.
* If the clock divider is uninitialized then reset it to the
* default value to get the 1Hz clock.
*/
if (readl_relaxed(info->rttr) == 0) {
writel_relaxed(RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16), info->rttr);
dev_warn(&pdev->dev, "warning: "
"initializing default clock divider/trim value\n");
/* The current RTC value probably doesn't make sense either */
writel_relaxed(0, info->rcnr);
}
info->rtc->ops = &sa1100_rtc_ops;
info->rtc->max_user_freq = RTC_FREQ;
info->rtc->range_max = U32_MAX;
ret = devm_rtc_register_device(info->rtc);
if (ret) {
clk_disable_unprepare(info->clk);
return ret;
}
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
*
* Sometimes bit 1 of the RTSR (RTSR_HZ) will wake up 1, which means an
* interrupt pending, even though interrupts were never enabled.
* In this case, this bit it must be reset before enabling
* interruptions to avoid a nonexistent interrupt to occur.
*
* In principle, the same problem would apply to bit 0, although it has
* never been observed to happen.
*
* This issue is addressed both here and in sa1100_rtc_interrupt().
* If the issue is not addressed here, in the times when the processor
* wakes up with the bit set there will be one spurious interrupt.
*
* The issue is also dealt with in sa1100_rtc_interrupt() to be on the
* safe side, once the condition that lead to this strange
* initialization is unknown and could in principle happen during
* normal processing.
*
* Notice that clearing bit 1 and 0 is accomplished by writting ONES to
* the corresponding bits in RTSR. */
writel_relaxed(RTSR_AL | RTSR_HZ, info->rtsr);
return 0;
}
EXPORT_SYMBOL_GPL(sa1100_rtc_init);
static int sa1100_rtc_probe(struct platform_device *pdev)
{
struct sa1100_rtc *info;
void __iomem *base;
int irq_1hz, irq_alarm;
int ret;
irq_1hz = platform_get_irq_byname(pdev, "rtc 1Hz");
irq_alarm = platform_get_irq_byname(pdev, "rtc alarm");
if (irq_1hz < 0 || irq_alarm < 0)
return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(struct sa1100_rtc), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->irq_1hz = irq_1hz;
info->irq_alarm = irq_alarm;
info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc))
return PTR_ERR(info->rtc);
ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0,
"rtc 1Hz", &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "IRQ %d already in use.\n", irq_1hz);
return ret;
}
ret = devm_request_irq(&pdev->dev, irq_alarm, sa1100_rtc_interrupt, 0,
"rtc Alrm", &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "IRQ %d already in use.\n", irq_alarm);
return ret;
}
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
if (IS_ENABLED(CONFIG_ARCH_SA1100) ||
of_device_is_compatible(pdev->dev.of_node, "mrvl,sa1100-rtc")) {
info->rcnr = base + 0x04;
info->rtsr = base + 0x10;
info->rtar = base + 0x00;
info->rttr = base + 0x08;
} else {
info->rcnr = base + 0x0;
info->rtsr = base + 0x8;
info->rtar = base + 0x4;
info->rttr = base + 0xc;
}
platform_set_drvdata(pdev, info);
device_init_wakeup(&pdev->dev, 1);
return sa1100_rtc_init(pdev, info);
}
static void sa1100_rtc_remove(struct platform_device *pdev)
{
struct sa1100_rtc *info = platform_get_drvdata(pdev);
if (info) {
spin_lock_irq(&info->lock);
writel_relaxed(0, info->rtsr);
spin_unlock_irq(&info->lock);
clk_disable_unprepare(info->clk);
}
}
#ifdef CONFIG_PM_SLEEP
static int sa1100_rtc_suspend(struct device *dev)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(info->irq_alarm);
return 0;
}
static int sa1100_rtc_resume(struct device *dev)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(info->irq_alarm);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(sa1100_rtc_pm_ops, sa1100_rtc_suspend,
sa1100_rtc_resume);
#ifdef CONFIG_OF
static const struct of_device_id sa1100_rtc_dt_ids[] = {
{ .compatible = "mrvl,sa1100-rtc", },
{ .compatible = "mrvl,mmp-rtc", },
{}
};
MODULE_DEVICE_TABLE(of, sa1100_rtc_dt_ids);
#endif
static struct platform_driver sa1100_rtc_driver = {
.probe = sa1100_rtc_probe,
.remove_new = sa1100_rtc_remove,
.driver = {
.name = "sa1100-rtc",
.pm = &sa1100_rtc_pm_ops,
.of_match_table = of_match_ptr(sa1100_rtc_dt_ids),
},
};
module_platform_driver(sa1100_rtc_driver);
MODULE_AUTHOR("Richard Purdie <[email protected]>");
MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sa1100-rtc");
| linux-master | drivers/rtc/rtc-sa1100.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ST M48T86 / Dallas DS12887 RTC driver
* Copyright (c) 2006 Tower Technologies
*
* Author: Alessandro Zummo <[email protected]>
*
* This drivers only supports the clock running in BCD and 24H mode.
* If it will be ever adapted to binary and 12H mode, care must be taken
* to not introduce bugs.
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
#include <linux/io.h>
#define M48T86_SEC 0x00
#define M48T86_SECALRM 0x01
#define M48T86_MIN 0x02
#define M48T86_MINALRM 0x03
#define M48T86_HOUR 0x04
#define M48T86_HOURALRM 0x05
#define M48T86_DOW 0x06 /* 1 = sunday */
#define M48T86_DOM 0x07
#define M48T86_MONTH 0x08 /* 1 - 12 */
#define M48T86_YEAR 0x09 /* 0 - 99 */
#define M48T86_A 0x0a
#define M48T86_B 0x0b
#define M48T86_B_SET BIT(7)
#define M48T86_B_DM BIT(2)
#define M48T86_B_H24 BIT(1)
#define M48T86_C 0x0c
#define M48T86_D 0x0d
#define M48T86_D_VRT BIT(7)
#define M48T86_NVRAM(x) (0x0e + (x))
#define M48T86_NVRAM_LEN 114
struct m48t86_rtc_info {
void __iomem *index_reg;
void __iomem *data_reg;
struct rtc_device *rtc;
};
static unsigned char m48t86_readb(struct device *dev, unsigned long addr)
{
struct m48t86_rtc_info *info = dev_get_drvdata(dev);
unsigned char value;
writeb(addr, info->index_reg);
value = readb(info->data_reg);
return value;
}
static void m48t86_writeb(struct device *dev,
unsigned char value, unsigned long addr)
{
struct m48t86_rtc_info *info = dev_get_drvdata(dev);
writeb(addr, info->index_reg);
writeb(value, info->data_reg);
}
static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned char reg;
reg = m48t86_readb(dev, M48T86_B);
if (reg & M48T86_B_DM) {
/* data (binary) mode */
tm->tm_sec = m48t86_readb(dev, M48T86_SEC);
tm->tm_min = m48t86_readb(dev, M48T86_MIN);
tm->tm_hour = m48t86_readb(dev, M48T86_HOUR) & 0x3f;
tm->tm_mday = m48t86_readb(dev, M48T86_DOM);
/* tm_mon is 0-11 */
tm->tm_mon = m48t86_readb(dev, M48T86_MONTH) - 1;
tm->tm_year = m48t86_readb(dev, M48T86_YEAR) + 100;
tm->tm_wday = m48t86_readb(dev, M48T86_DOW);
} else {
/* bcd mode */
tm->tm_sec = bcd2bin(m48t86_readb(dev, M48T86_SEC));
tm->tm_min = bcd2bin(m48t86_readb(dev, M48T86_MIN));
tm->tm_hour = bcd2bin(m48t86_readb(dev, M48T86_HOUR) &
0x3f);
tm->tm_mday = bcd2bin(m48t86_readb(dev, M48T86_DOM));
/* tm_mon is 0-11 */
tm->tm_mon = bcd2bin(m48t86_readb(dev, M48T86_MONTH)) - 1;
tm->tm_year = bcd2bin(m48t86_readb(dev, M48T86_YEAR)) + 100;
tm->tm_wday = bcd2bin(m48t86_readb(dev, M48T86_DOW));
}
/* correct the hour if the clock is in 12h mode */
if (!(reg & M48T86_B_H24))
if (m48t86_readb(dev, M48T86_HOUR) & 0x80)
tm->tm_hour += 12;
return 0;
}
static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char reg;
reg = m48t86_readb(dev, M48T86_B);
/* update flag and 24h mode */
reg |= M48T86_B_SET | M48T86_B_H24;
m48t86_writeb(dev, reg, M48T86_B);
if (reg & M48T86_B_DM) {
/* data (binary) mode */
m48t86_writeb(dev, tm->tm_sec, M48T86_SEC);
m48t86_writeb(dev, tm->tm_min, M48T86_MIN);
m48t86_writeb(dev, tm->tm_hour, M48T86_HOUR);
m48t86_writeb(dev, tm->tm_mday, M48T86_DOM);
m48t86_writeb(dev, tm->tm_mon + 1, M48T86_MONTH);
m48t86_writeb(dev, tm->tm_year % 100, M48T86_YEAR);
m48t86_writeb(dev, tm->tm_wday, M48T86_DOW);
} else {
/* bcd mode */
m48t86_writeb(dev, bin2bcd(tm->tm_sec), M48T86_SEC);
m48t86_writeb(dev, bin2bcd(tm->tm_min), M48T86_MIN);
m48t86_writeb(dev, bin2bcd(tm->tm_hour), M48T86_HOUR);
m48t86_writeb(dev, bin2bcd(tm->tm_mday), M48T86_DOM);
m48t86_writeb(dev, bin2bcd(tm->tm_mon + 1), M48T86_MONTH);
m48t86_writeb(dev, bin2bcd(tm->tm_year % 100), M48T86_YEAR);
m48t86_writeb(dev, bin2bcd(tm->tm_wday), M48T86_DOW);
}
/* update ended */
reg &= ~M48T86_B_SET;
m48t86_writeb(dev, reg, M48T86_B);
return 0;
}
static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq)
{
unsigned char reg;
reg = m48t86_readb(dev, M48T86_B);
seq_printf(seq, "mode\t\t: %s\n",
(reg & M48T86_B_DM) ? "binary" : "bcd");
reg = m48t86_readb(dev, M48T86_D);
seq_printf(seq, "battery\t\t: %s\n",
(reg & M48T86_D_VRT) ? "ok" : "exhausted");
return 0;
}
static const struct rtc_class_ops m48t86_rtc_ops = {
.read_time = m48t86_rtc_read_time,
.set_time = m48t86_rtc_set_time,
.proc = m48t86_rtc_proc,
};
static int m48t86_nvram_read(void *priv, unsigned int off, void *buf,
size_t count)
{
struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
((u8 *)buf)[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
return 0;
}
static int m48t86_nvram_write(void *priv, unsigned int off, void *buf,
size_t count)
{
struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
m48t86_writeb(dev, ((u8 *)buf)[i], M48T86_NVRAM(off + i));
return 0;
}
/*
* The RTC is an optional feature at purchase time on some Technologic Systems
* boards. Verify that it actually exists by checking if the last two bytes
* of the NVRAM can be changed.
*
* This is based on the method used in their rtc7800.c example.
*/
static bool m48t86_verify_chip(struct platform_device *pdev)
{
unsigned int offset0 = M48T86_NVRAM(M48T86_NVRAM_LEN - 2);
unsigned int offset1 = M48T86_NVRAM(M48T86_NVRAM_LEN - 1);
unsigned char tmp0, tmp1;
tmp0 = m48t86_readb(&pdev->dev, offset0);
tmp1 = m48t86_readb(&pdev->dev, offset1);
m48t86_writeb(&pdev->dev, 0x00, offset0);
m48t86_writeb(&pdev->dev, 0x55, offset1);
if (m48t86_readb(&pdev->dev, offset1) == 0x55) {
m48t86_writeb(&pdev->dev, 0xaa, offset1);
if (m48t86_readb(&pdev->dev, offset1) == 0xaa &&
m48t86_readb(&pdev->dev, offset0) == 0x00) {
m48t86_writeb(&pdev->dev, tmp0, offset0);
m48t86_writeb(&pdev->dev, tmp1, offset1);
return true;
}
}
return false;
}
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
unsigned char reg;
int err;
struct nvmem_config m48t86_nvmem_cfg = {
.name = "m48t86_nvram",
.word_size = 1,
.stride = 1,
.size = M48T86_NVRAM_LEN,
.reg_read = m48t86_nvram_read,
.reg_write = m48t86_nvram_write,
.priv = &pdev->dev,
};
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->index_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->index_reg))
return PTR_ERR(info->index_reg);
info->data_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(info->data_reg))
return PTR_ERR(info->data_reg);
dev_set_drvdata(&pdev->dev, info);
if (!m48t86_verify_chip(pdev)) {
dev_info(&pdev->dev, "RTC not present\n");
return -ENODEV;
}
info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc))
return PTR_ERR(info->rtc);
info->rtc->ops = &m48t86_rtc_ops;
err = devm_rtc_register_device(info->rtc);
if (err)
return err;
devm_rtc_nvmem_register(info->rtc, &m48t86_nvmem_cfg);
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
dev_info(&pdev->dev, "battery %s\n",
(reg & M48T86_D_VRT) ? "ok" : "exhausted");
return 0;
}
static const struct of_device_id m48t86_rtc_of_ids[] = {
{ .compatible = "st,m48t86" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, m48t86_rtc_of_ids);
static struct platform_driver m48t86_rtc_platform_driver = {
.driver = {
.name = "rtc-m48t86",
.of_match_table = m48t86_rtc_of_ids,
},
.probe = m48t86_rtc_probe,
};
module_platform_driver(m48t86_rtc_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <[email protected]>");
MODULE_DESCRIPTION("M48T86 RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rtc-m48t86");
| linux-master | drivers/rtc/rtc-m48t86.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM OPAL RTC driver
* Copyright (C) 2014 IBM
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRVNAME "rtc-opal"
#include <linux/module.h>
#include <linux/err.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <asm/opal.h>
#include <asm/firmware.h>
static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
{
tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1;
tm->tm_mday = bcd2bin(y_m_d & 0xff);
tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff);
tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff);
tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff);
tm->tm_wday = -1;
}
static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
{
*y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
*y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
*y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
*y_m_d |= ((u32)bin2bcd(tm->tm_mday));
*h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
*h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
*h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
}
static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
{
s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d;
u64 h_m_s_ms;
__be32 __y_m_d;
__be64 __h_m_s_ms;
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
if (rc == OPAL_BUSY_EVENT) {
msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
} else if (rc == OPAL_BUSY) {
msleep(OPAL_BUSY_DELAY_MS);
} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
if (retries--) {
msleep(10); /* Wait 10ms before retry */
rc = OPAL_BUSY; /* go around again */
}
}
}
if (rc != OPAL_SUCCESS)
return -EIO;
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = be64_to_cpu(__h_m_s_ms);
opal_to_tm(y_m_d, h_m_s_ms, tm);
return 0;
}
static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
{
s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d = 0;
u64 h_m_s_ms = 0;
tm_to_opal(tm, &y_m_d, &h_m_s_ms);
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_write(y_m_d, h_m_s_ms);
if (rc == OPAL_BUSY_EVENT) {
msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
} else if (rc == OPAL_BUSY) {
msleep(OPAL_BUSY_DELAY_MS);
} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
if (retries--) {
msleep(10); /* Wait 10ms before retry */
rc = OPAL_BUSY; /* go around again */
}
}
}
return rc == OPAL_SUCCESS ? 0 : -EIO;
}
/*
* TPO Timed Power-On
*
* TPO get/set OPAL calls care about the hour and min and to make it consistent
* with the rtc utility time conversion functions, we use the 'u64' to store
* its value and perform bit shift by 32 before use..
*/
static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
{
__be32 __y_m_d, __h_m;
struct opal_msg msg;
int rc, token;
u64 h_m_s_ms;
u32 y_m_d;
token = opal_async_get_token_interruptible();
if (token < 0) {
if (token != -ERESTARTSYS)
pr_err("Failed to get the async token\n");
return token;
}
rc = opal_tpo_read(token, &__y_m_d, &__h_m);
if (rc != OPAL_ASYNC_COMPLETION) {
rc = -EIO;
goto exit;
}
rc = opal_async_wait_response(token, &msg);
if (rc) {
rc = -EIO;
goto exit;
}
rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS) {
rc = -EIO;
goto exit;
}
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
/* check if no alarm is set */
if (y_m_d == 0 && h_m_s_ms == 0) {
pr_debug("No alarm is set\n");
rc = -ENOENT;
goto exit;
} else {
pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
}
opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
exit:
opal_async_release_token(token);
return rc;
}
/* Set Timed Power-On */
static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
{
u64 h_m_s_ms = 0;
struct opal_msg msg;
u32 y_m_d = 0;
int token, rc;
/* if alarm is enabled */
if (alarm->enabled) {
tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms);
pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
} else {
pr_debug("Alarm getting disabled\n");
}
token = opal_async_get_token_interruptible();
if (token < 0) {
if (token != -ERESTARTSYS)
pr_err("Failed to get the async token\n");
return token;
}
/* TPO, we care about hour and minute */
rc = opal_tpo_write(token, y_m_d,
(u32)((h_m_s_ms >> 32) & 0xffff0000));
if (rc != OPAL_ASYNC_COMPLETION) {
rc = -EIO;
goto exit;
}
rc = opal_async_wait_response(token, &msg);
if (rc) {
rc = -EIO;
goto exit;
}
rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS)
rc = -EIO;
exit:
opal_async_release_token(token);
return rc;
}
static int opal_tpo_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct rtc_wkalrm alarm = { .enabled = 0 };
/*
* TPO is automatically enabled when opal_set_tpo_time() is called with
* non-zero rtc-time. We only handle disable case which needs to be
* explicitly told to opal.
*/
return enabled ? 0 : opal_set_tpo_time(dev, &alarm);
}
static const struct rtc_class_ops opal_rtc_ops = {
.read_time = opal_get_rtc_time,
.set_time = opal_set_rtc_time,
.read_alarm = opal_get_tpo_time,
.set_alarm = opal_set_tpo_time,
.alarm_irq_enable = opal_tpo_alarm_irq_enable,
};
static int opal_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
if (pdev->dev.of_node &&
(of_property_read_bool(pdev->dev.of_node, "wakeup-source") ||
of_property_read_bool(pdev->dev.of_node, "has-tpo")/* legacy */))
device_set_wakeup_capable(&pdev->dev, true);
else
clear_bit(RTC_FEATURE_ALARM, rtc->features);
rtc->ops = &opal_rtc_ops;
rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
rtc->range_max = RTC_TIMESTAMP_END_9999;
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
return devm_rtc_register_device(rtc);
}
static const struct of_device_id opal_rtc_match[] = {
{
.compatible = "ibm,opal-rtc",
},
{ }
};
MODULE_DEVICE_TABLE(of, opal_rtc_match);
static const struct platform_device_id opal_rtc_driver_ids[] = {
{
.name = "opal-rtc",
},
{ }
};
MODULE_DEVICE_TABLE(platform, opal_rtc_driver_ids);
static struct platform_driver opal_rtc_driver = {
.probe = opal_rtc_probe,
.id_table = opal_rtc_driver_ids,
.driver = {
.name = DRVNAME,
.of_match_table = opal_rtc_match,
},
};
static int __init opal_rtc_init(void)
{
if (!firmware_has_feature(FW_FEATURE_OPAL))
return -ENODEV;
return platform_driver_register(&opal_rtc_driver);
}
static void __exit opal_rtc_exit(void)
{
platform_driver_unregister(&opal_rtc_driver);
}
MODULE_AUTHOR("Neelesh Gupta <[email protected]>");
MODULE_DESCRIPTION("IBM OPAL RTC driver");
MODULE_LICENSE("GPL");
module_init(opal_rtc_init);
module_exit(opal_rtc_exit);
| linux-master | drivers/rtc/rtc-opal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Real Time Clock driver for Wolfson Microelectronics WM8350
*
* Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
*
* Author: Liam Girdwood
* [email protected]
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/completion.h>
#include <linux/mfd/wm8350/rtc.h>
#include <linux/mfd/wm8350/core.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#define WM8350_SET_ALM_RETRIES 5
#define WM8350_SET_TIME_RETRIES 5
#define WM8350_GET_TIME_RETRIES 5
/*
* Read current time and date in RTC
*/
static int wm8350_rtc_readtime(struct device *dev, struct rtc_time *tm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
u16 time1[4], time2[4];
int retries = WM8350_GET_TIME_RETRIES, ret;
/*
* Read the time twice and compare.
* If time1 == time2, then time is valid else retry.
*/
do {
ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES,
4, time1);
if (ret < 0)
return ret;
ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES,
4, time2);
if (ret < 0)
return ret;
if (memcmp(time1, time2, sizeof(time1)) == 0) {
tm->tm_sec = time1[0] & WM8350_RTC_SECS_MASK;
tm->tm_min = (time1[0] & WM8350_RTC_MINS_MASK)
>> WM8350_RTC_MINS_SHIFT;
tm->tm_hour = time1[1] & WM8350_RTC_HRS_MASK;
tm->tm_wday = ((time1[1] >> WM8350_RTC_DAY_SHIFT)
& 0x7) - 1;
tm->tm_mon = ((time1[2] & WM8350_RTC_MTH_MASK)
>> WM8350_RTC_MTH_SHIFT) - 1;
tm->tm_mday = (time1[2] & WM8350_RTC_DATE_MASK);
tm->tm_year = ((time1[3] & WM8350_RTC_YHUNDREDS_MASK)
>> WM8350_RTC_YHUNDREDS_SHIFT) * 100;
tm->tm_year += time1[3] & WM8350_RTC_YUNITS_MASK;
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon,
tm->tm_year);
tm->tm_year -= 1900;
dev_dbg(dev, "Read (%d left): %04x %04x %04x %04x\n",
retries,
time1[0], time1[1], time1[2], time1[3]);
return 0;
}
} while (retries--);
dev_err(dev, "timed out reading RTC time\n");
return -EIO;
}
/*
* Set current time and date in RTC
*/
static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
u16 time[4];
u16 rtc_ctrl;
int ret, retries = WM8350_SET_TIME_RETRIES;
time[0] = tm->tm_sec;
time[0] |= tm->tm_min << WM8350_RTC_MINS_SHIFT;
time[1] = tm->tm_hour;
time[1] |= (tm->tm_wday + 1) << WM8350_RTC_DAY_SHIFT;
time[2] = tm->tm_mday;
time[2] |= (tm->tm_mon + 1) << WM8350_RTC_MTH_SHIFT;
time[3] = ((tm->tm_year + 1900) / 100) << WM8350_RTC_YHUNDREDS_SHIFT;
time[3] |= (tm->tm_year + 1900) % 100;
dev_dbg(dev, "Setting: %04x %04x %04x %04x\n",
time[0], time[1], time[2], time[3]);
/* Set RTC_SET to stop the clock */
ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET);
if (ret < 0)
return ret;
/* Wait until confirmation of stopping */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
schedule_timeout_uninterruptible(msecs_to_jiffies(1));
} while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
if (!retries) {
dev_err(dev, "timed out on set confirmation\n");
return -EIO;
}
/* Write time to RTC */
ret = wm8350_block_write(wm8350, WM8350_RTC_SECONDS_MINUTES, 4, time);
if (ret < 0)
return ret;
/* Clear RTC_SET to start the clock */
ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_SET);
return ret;
}
/*
* Read alarm time and date in RTC
*/
static int wm8350_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
u16 time[4];
int ret;
ret = wm8350_block_read(wm8350, WM8350_ALARM_SECONDS_MINUTES, 4, time);
if (ret < 0)
return ret;
tm->tm_sec = time[0] & WM8350_RTC_ALMSECS_MASK;
if (tm->tm_sec == WM8350_RTC_ALMSECS_MASK)
tm->tm_sec = -1;
tm->tm_min = time[0] & WM8350_RTC_ALMMINS_MASK;
if (tm->tm_min == WM8350_RTC_ALMMINS_MASK)
tm->tm_min = -1;
else
tm->tm_min >>= WM8350_RTC_ALMMINS_SHIFT;
tm->tm_hour = time[1] & WM8350_RTC_ALMHRS_MASK;
if (tm->tm_hour == WM8350_RTC_ALMHRS_MASK)
tm->tm_hour = -1;
tm->tm_wday = ((time[1] >> WM8350_RTC_ALMDAY_SHIFT) & 0x7) - 1;
if (tm->tm_wday > 7)
tm->tm_wday = -1;
tm->tm_mon = time[2] & WM8350_RTC_ALMMTH_MASK;
if (tm->tm_mon == WM8350_RTC_ALMMTH_MASK)
tm->tm_mon = -1;
else
tm->tm_mon = (tm->tm_mon >> WM8350_RTC_ALMMTH_SHIFT) - 1;
tm->tm_mday = (time[2] & WM8350_RTC_ALMDATE_MASK);
if (tm->tm_mday == WM8350_RTC_ALMDATE_MASK)
tm->tm_mday = -1;
tm->tm_year = -1;
alrm->enabled = !(time[3] & WM8350_RTC_ALMSTS);
return 0;
}
static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
{
int retries = WM8350_SET_ALM_RETRIES;
u16 rtc_ctrl;
int ret;
/* Set RTC_SET to stop the clock */
ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_ALMSET);
if (ret < 0)
return ret;
/* Wait until confirmation of stopping */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
schedule_timeout_uninterruptible(msecs_to_jiffies(1));
} while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
return -ETIMEDOUT;
return 0;
}
static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
{
int ret;
int retries = WM8350_SET_ALM_RETRIES;
u16 rtc_ctrl;
ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_ALMSET);
if (ret < 0)
return ret;
/* Wait until confirmation */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
schedule_timeout_uninterruptible(msecs_to_jiffies(1));
} while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
if (rtc_ctrl & WM8350_RTC_ALMSTS)
return -ETIMEDOUT;
return 0;
}
static int wm8350_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
if (enabled)
return wm8350_rtc_start_alarm(wm8350);
else
return wm8350_rtc_stop_alarm(wm8350);
}
static int wm8350_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
u16 time[3];
int ret;
memset(time, 0, sizeof(time));
if (tm->tm_sec != -1)
time[0] |= tm->tm_sec;
else
time[0] |= WM8350_RTC_ALMSECS_MASK;
if (tm->tm_min != -1)
time[0] |= tm->tm_min << WM8350_RTC_ALMMINS_SHIFT;
else
time[0] |= WM8350_RTC_ALMMINS_MASK;
if (tm->tm_hour != -1)
time[1] |= tm->tm_hour;
else
time[1] |= WM8350_RTC_ALMHRS_MASK;
if (tm->tm_wday != -1)
time[1] |= (tm->tm_wday + 1) << WM8350_RTC_ALMDAY_SHIFT;
else
time[1] |= WM8350_RTC_ALMDAY_MASK;
if (tm->tm_mday != -1)
time[2] |= tm->tm_mday;
else
time[2] |= WM8350_RTC_ALMDATE_MASK;
if (tm->tm_mon != -1)
time[2] |= (tm->tm_mon + 1) << WM8350_RTC_ALMMTH_SHIFT;
else
time[2] |= WM8350_RTC_ALMMTH_MASK;
ret = wm8350_rtc_stop_alarm(wm8350);
if (ret < 0)
return ret;
/* Write time to RTC */
ret = wm8350_block_write(wm8350, WM8350_ALARM_SECONDS_MINUTES,
3, time);
if (ret < 0)
return ret;
if (alrm->enabled)
ret = wm8350_rtc_start_alarm(wm8350);
return ret;
}
static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data)
{
struct wm8350 *wm8350 = data;
struct rtc_device *rtc = wm8350->rtc.rtc;
int ret;
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
/* Make it one shot */
ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_ALMSET);
if (ret != 0) {
dev_err(&(wm8350->rtc.pdev->dev),
"Failed to disable alarm: %d\n", ret);
}
return IRQ_HANDLED;
}
static irqreturn_t wm8350_rtc_update_handler(int irq, void *data)
{
struct wm8350 *wm8350 = data;
struct rtc_device *rtc = wm8350->rtc.rtc;
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_UF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops wm8350_rtc_ops = {
.read_time = wm8350_rtc_readtime,
.set_time = wm8350_rtc_settime,
.read_alarm = wm8350_rtc_readalarm,
.set_alarm = wm8350_rtc_setalarm,
.alarm_irq_enable = wm8350_rtc_alarm_irq_enable,
};
#ifdef CONFIG_PM_SLEEP
static int wm8350_rtc_suspend(struct device *dev)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
int ret = 0;
u16 reg;
reg = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
if (device_may_wakeup(&wm8350->rtc.pdev->dev) &&
reg & WM8350_RTC_ALMSTS) {
ret = wm8350_rtc_stop_alarm(wm8350);
if (ret != 0)
dev_err(dev, "Failed to stop RTC alarm: %d\n", ret);
}
return ret;
}
static int wm8350_rtc_resume(struct device *dev)
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
int ret;
if (wm8350->rtc.alarm_enabled) {
ret = wm8350_rtc_start_alarm(wm8350);
if (ret != 0)
dev_err(dev, "Failed to restart RTC alarm: %d\n", ret);
}
return 0;
}
#endif
static int wm8350_rtc_probe(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct wm8350_rtc *wm_rtc = &wm8350->rtc;
int ret = 0;
u16 timectl, power5;
timectl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
if (timectl & WM8350_RTC_BCD) {
dev_err(&pdev->dev, "RTC BCD mode not supported\n");
return -EINVAL;
}
if (timectl & WM8350_RTC_12HR) {
dev_err(&pdev->dev, "RTC 12 hour mode not supported\n");
return -EINVAL;
}
/* enable the RTC if it's not already enabled */
power5 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5);
if (!(power5 & WM8350_RTC_TICK_ENA)) {
wm8350_reg_unlock(wm8350);
ret = wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5,
WM8350_RTC_TICK_ENA);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable RTC: %d\n", ret);
return ret;
}
wm8350_reg_lock(wm8350);
}
if (timectl & WM8350_RTC_STS) {
int retries;
ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL,
WM8350_RTC_SET);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start: %d\n", ret);
return ret;
}
retries = WM8350_SET_TIME_RETRIES;
do {
timectl = wm8350_reg_read(wm8350,
WM8350_RTC_TIME_CONTROL);
} while (timectl & WM8350_RTC_STS && --retries);
if (retries == 0) {
dev_err(&pdev->dev, "failed to start: timeout\n");
return -ENODEV;
}
}
device_init_wakeup(&pdev->dev, 1);
wm_rtc->rtc = devm_rtc_device_register(&pdev->dev, "wm8350",
&wm8350_rtc_ops, THIS_MODULE);
if (IS_ERR(wm_rtc->rtc))
return PTR_ERR(wm_rtc->rtc);
ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
wm8350_rtc_update_handler, 0,
"RTC Seconds", wm8350);
if (ret)
return ret;
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
wm8350_rtc_alarm_handler, 0,
"RTC Alarm", wm8350);
if (ret) {
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
return ret;
}
return 0;
}
static void wm8350_rtc_remove(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM, wm8350);
}
static SIMPLE_DEV_PM_OPS(wm8350_rtc_pm_ops, wm8350_rtc_suspend,
wm8350_rtc_resume);
static struct platform_driver wm8350_rtc_driver = {
.probe = wm8350_rtc_probe,
.remove_new = wm8350_rtc_remove,
.driver = {
.name = "wm8350-rtc",
.pm = &wm8350_rtc_pm_ops,
},
};
module_platform_driver(wm8350_rtc_driver);
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_DESCRIPTION("RTC driver for the WM8350");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8350-rtc");
| linux-master | drivers/rtc/rtc-wm8350.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc-rc5t583.c -- RICOH RC5T583 Real Time Clock
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
* Author: Venu Byravarasu <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mfd/rc5t583.h>
struct rc5t583_rtc {
struct rtc_device *rtc;
/* To store the list of enabled interrupts, during system suspend */
u32 irqen;
};
/* Total number of RTC registers needed to set time*/
#define NUM_TIME_REGS (RC5T583_RTC_YEAR - RC5T583_RTC_SEC + 1)
/* Total number of RTC registers needed to set Y-Alarm*/
#define NUM_YAL_REGS (RC5T583_RTC_AY_YEAR - RC5T583_RTC_AY_MIN + 1)
/* Set Y-Alarm interrupt */
#define SET_YAL BIT(5)
/* Get Y-Alarm interrupt status*/
#define GET_YAL_STATUS BIT(3)
static int rc5t583_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
u8 val;
/* Set Y-Alarm, based on 'enabled' */
val = enabled ? SET_YAL : 0;
return regmap_update_bits(rc5t583->regmap, RC5T583_RTC_CTL1, SET_YAL,
val);
}
/*
* Gets current rc5t583 RTC time and date parameters.
*
* The RTC's time/alarm representation is not what gmtime(3) requires
* Linux to use:
*
* - Months are 1..12 vs Linux 0-11
* - Years are 0..99 vs Linux 1900..N (we assume 21st century)
*/
static int rc5t583_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
u8 rtc_data[NUM_TIME_REGS];
int ret;
ret = regmap_bulk_read(rc5t583->regmap, RC5T583_RTC_SEC, rtc_data,
NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "RTC read time failed with err:%d\n", ret);
return ret;
}
tm->tm_sec = bcd2bin(rtc_data[0]);
tm->tm_min = bcd2bin(rtc_data[1]);
tm->tm_hour = bcd2bin(rtc_data[2]);
tm->tm_wday = bcd2bin(rtc_data[3]);
tm->tm_mday = bcd2bin(rtc_data[4]);
tm->tm_mon = bcd2bin(rtc_data[5]) - 1;
tm->tm_year = bcd2bin(rtc_data[6]) + 100;
return ret;
}
static int rc5t583_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
unsigned char rtc_data[NUM_TIME_REGS];
int ret;
rtc_data[0] = bin2bcd(tm->tm_sec);
rtc_data[1] = bin2bcd(tm->tm_min);
rtc_data[2] = bin2bcd(tm->tm_hour);
rtc_data[3] = bin2bcd(tm->tm_wday);
rtc_data[4] = bin2bcd(tm->tm_mday);
rtc_data[5] = bin2bcd(tm->tm_mon + 1);
rtc_data[6] = bin2bcd(tm->tm_year - 100);
ret = regmap_bulk_write(rc5t583->regmap, RC5T583_RTC_SEC, rtc_data,
NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "RTC set time failed with error %d\n", ret);
return ret;
}
return ret;
}
static int rc5t583_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
unsigned char alarm_data[NUM_YAL_REGS];
u32 interrupt_enable;
int ret;
ret = regmap_bulk_read(rc5t583->regmap, RC5T583_RTC_AY_MIN, alarm_data,
NUM_YAL_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_alarm error %d\n", ret);
return ret;
}
alm->time.tm_sec = 0;
alm->time.tm_min = bcd2bin(alarm_data[0]);
alm->time.tm_hour = bcd2bin(alarm_data[1]);
alm->time.tm_mday = bcd2bin(alarm_data[2]);
alm->time.tm_mon = bcd2bin(alarm_data[3]) - 1;
alm->time.tm_year = bcd2bin(alarm_data[4]) + 100;
ret = regmap_read(rc5t583->regmap, RC5T583_RTC_CTL1, &interrupt_enable);
if (ret < 0)
return ret;
/* check if YALE is set */
if (interrupt_enable & SET_YAL)
alm->enabled = 1;
return ret;
}
static int rc5t583_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
unsigned char alarm_data[NUM_YAL_REGS];
int ret;
ret = rc5t583_rtc_alarm_irq_enable(dev, 0);
if (ret)
return ret;
alarm_data[0] = bin2bcd(alm->time.tm_min);
alarm_data[1] = bin2bcd(alm->time.tm_hour);
alarm_data[2] = bin2bcd(alm->time.tm_mday);
alarm_data[3] = bin2bcd(alm->time.tm_mon + 1);
alarm_data[4] = bin2bcd(alm->time.tm_year - 100);
ret = regmap_bulk_write(rc5t583->regmap, RC5T583_RTC_AY_MIN, alarm_data,
NUM_YAL_REGS);
if (ret) {
dev_err(dev, "rtc_set_alarm error %d\n", ret);
return ret;
}
if (alm->enabled)
ret = rc5t583_rtc_alarm_irq_enable(dev, 1);
return ret;
}
static irqreturn_t rc5t583_rtc_interrupt(int irq, void *rtc)
{
struct device *dev = rtc;
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
struct rc5t583_rtc *rc5t583_rtc = dev_get_drvdata(dev);
unsigned long events = 0;
int ret;
u32 rtc_reg;
ret = regmap_read(rc5t583->regmap, RC5T583_RTC_CTL2, &rtc_reg);
if (ret < 0)
return IRQ_NONE;
if (rtc_reg & GET_YAL_STATUS) {
events = RTC_IRQF | RTC_AF;
/* clear pending Y-alarm interrupt bit */
rtc_reg &= ~GET_YAL_STATUS;
}
ret = regmap_write(rc5t583->regmap, RC5T583_RTC_CTL2, rtc_reg);
if (ret)
return IRQ_NONE;
/* Notify RTC core on event */
rtc_update_irq(rc5t583_rtc->rtc, 1, events);
return IRQ_HANDLED;
}
static const struct rtc_class_ops rc5t583_rtc_ops = {
.read_time = rc5t583_rtc_read_time,
.set_time = rc5t583_rtc_set_time,
.read_alarm = rc5t583_rtc_read_alarm,
.set_alarm = rc5t583_rtc_set_alarm,
.alarm_irq_enable = rc5t583_rtc_alarm_irq_enable,
};
static int rc5t583_rtc_probe(struct platform_device *pdev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_rtc *ricoh_rtc;
struct rc5t583_platform_data *pmic_plat_data;
int ret;
int irq;
ricoh_rtc = devm_kzalloc(&pdev->dev, sizeof(struct rc5t583_rtc),
GFP_KERNEL);
if (!ricoh_rtc)
return -ENOMEM;
platform_set_drvdata(pdev, ricoh_rtc);
/* Clear pending interrupts */
ret = regmap_write(rc5t583->regmap, RC5T583_RTC_CTL2, 0);
if (ret < 0)
return ret;
/* clear RTC Adjust register */
ret = regmap_write(rc5t583->regmap, RC5T583_RTC_ADJ, 0);
if (ret < 0) {
dev_err(&pdev->dev, "unable to program rtc_adjust reg\n");
return -EBUSY;
}
pmic_plat_data = dev_get_platdata(rc5t583->dev);
irq = pmic_plat_data->irq_base;
if (irq <= 0) {
dev_warn(&pdev->dev, "Wake up is not possible as irq = %d\n",
irq);
return ret;
}
irq += RC5T583_IRQ_YALE;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
rc5t583_rtc_interrupt, IRQF_TRIGGER_LOW,
"rtc-rc5t583", &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
}
device_init_wakeup(&pdev->dev, 1);
ricoh_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&rc5t583_rtc_ops, THIS_MODULE);
if (IS_ERR(ricoh_rtc->rtc)) {
ret = PTR_ERR(ricoh_rtc->rtc);
dev_err(&pdev->dev, "RTC device register: err %d\n", ret);
return ret;
}
return 0;
}
/*
* Disable rc5t583 RTC interrupts.
* Sets status flag to free.
*/
static void rc5t583_rtc_remove(struct platform_device *pdev)
{
struct rc5t583_rtc *rc5t583_rtc = platform_get_drvdata(pdev);
rc5t583_rtc_alarm_irq_enable(&rc5t583_rtc->rtc->dev, 0);
}
#ifdef CONFIG_PM_SLEEP
static int rc5t583_rtc_suspend(struct device *dev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
struct rc5t583_rtc *rc5t583_rtc = dev_get_drvdata(dev);
int ret;
/* Store current list of enabled interrupts*/
ret = regmap_read(rc5t583->regmap, RC5T583_RTC_CTL1,
&rc5t583_rtc->irqen);
return ret;
}
static int rc5t583_rtc_resume(struct device *dev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(dev->parent);
struct rc5t583_rtc *rc5t583_rtc = dev_get_drvdata(dev);
/* Restore list of enabled interrupts before suspend */
return regmap_write(rc5t583->regmap, RC5T583_RTC_CTL1,
rc5t583_rtc->irqen);
}
#endif
static SIMPLE_DEV_PM_OPS(rc5t583_rtc_pm_ops, rc5t583_rtc_suspend,
rc5t583_rtc_resume);
static struct platform_driver rc5t583_rtc_driver = {
.probe = rc5t583_rtc_probe,
.remove_new = rc5t583_rtc_remove,
.driver = {
.name = "rtc-rc5t583",
.pm = &rc5t583_rtc_pm_ops,
},
};
module_platform_driver(rc5t583_rtc_driver);
MODULE_ALIAS("platform:rtc-rc5t583");
MODULE_AUTHOR("Venu Byravarasu <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-rc5t583.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RTC driver for Maxim MAX8925
*
* Copyright (C) 2009-2010 Marvell International Ltd.
* Haojian Zhuang <[email protected]>
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/mfd/max8925.h>
enum {
RTC_SEC = 0,
RTC_MIN,
RTC_HOUR,
RTC_WEEKDAY,
RTC_DATE,
RTC_MONTH,
RTC_YEAR1,
RTC_YEAR2,
};
#define MAX8925_RTC_SEC 0x00
#define MAX8925_RTC_MIN 0x01
#define MAX8925_RTC_HOUR 0x02
#define MAX8925_RTC_WEEKDAY 0x03
#define MAX8925_RTC_DATE 0x04
#define MAX8925_RTC_MONTH 0x05
#define MAX8925_RTC_YEAR1 0x06
#define MAX8925_RTC_YEAR2 0x07
#define MAX8925_ALARM0_SEC 0x08
#define MAX8925_ALARM0_MIN 0x09
#define MAX8925_ALARM0_HOUR 0x0a
#define MAX8925_ALARM0_WEEKDAY 0x0b
#define MAX8925_ALARM0_DATE 0x0c
#define MAX8925_ALARM0_MON 0x0d
#define MAX8925_ALARM0_YEAR1 0x0e
#define MAX8925_ALARM0_YEAR2 0x0f
#define MAX8925_ALARM1_SEC 0x10
#define MAX8925_ALARM1_MIN 0x11
#define MAX8925_ALARM1_HOUR 0x12
#define MAX8925_ALARM1_WEEKDAY 0x13
#define MAX8925_ALARM1_DATE 0x14
#define MAX8925_ALARM1_MON 0x15
#define MAX8925_ALARM1_YEAR1 0x16
#define MAX8925_ALARM1_YEAR2 0x17
#define MAX8925_RTC_CNTL 0x1b
#define MAX8925_RTC_STATUS 0x20
#define TIME_NUM 8
#define ALARM_1SEC (1 << 7)
#define HOUR_12 (1 << 7)
#define HOUR_AM_PM (1 << 5)
#define ALARM0_IRQ (1 << 3)
#define ALARM1_IRQ (1 << 2)
#define ALARM0_STATUS (1 << 2)
#define ALARM1_STATUS (1 << 1)
struct max8925_rtc_info {
struct rtc_device *rtc_dev;
struct max8925_chip *chip;
struct i2c_client *rtc;
struct device *dev;
int irq;
};
static irqreturn_t rtc_update_handler(int irq, void *data)
{
struct max8925_rtc_info *info = (struct max8925_rtc_info *)data;
/* disable ALARM0 except for 1SEC alarm */
max8925_set_bits(info->rtc, MAX8925_ALARM0_CNTL, 0x7f, 0);
rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static int tm_calc(struct rtc_time *tm, unsigned char *buf, int len)
{
if (len < TIME_NUM)
return -EINVAL;
tm->tm_year = (buf[RTC_YEAR2] >> 4) * 1000
+ (buf[RTC_YEAR2] & 0xf) * 100
+ (buf[RTC_YEAR1] >> 4) * 10
+ (buf[RTC_YEAR1] & 0xf);
tm->tm_year -= 1900;
tm->tm_mon = ((buf[RTC_MONTH] >> 4) & 0x01) * 10
+ (buf[RTC_MONTH] & 0x0f);
tm->tm_mday = ((buf[RTC_DATE] >> 4) & 0x03) * 10
+ (buf[RTC_DATE] & 0x0f);
tm->tm_wday = buf[RTC_WEEKDAY] & 0x07;
if (buf[RTC_HOUR] & HOUR_12) {
tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x1) * 10
+ (buf[RTC_HOUR] & 0x0f);
if (buf[RTC_HOUR] & HOUR_AM_PM)
tm->tm_hour += 12;
} else
tm->tm_hour = ((buf[RTC_HOUR] >> 4) & 0x03) * 10
+ (buf[RTC_HOUR] & 0x0f);
tm->tm_min = ((buf[RTC_MIN] >> 4) & 0x7) * 10
+ (buf[RTC_MIN] & 0x0f);
tm->tm_sec = ((buf[RTC_SEC] >> 4) & 0x7) * 10
+ (buf[RTC_SEC] & 0x0f);
return 0;
}
static int data_calc(unsigned char *buf, struct rtc_time *tm, int len)
{
unsigned char high, low;
if (len < TIME_NUM)
return -EINVAL;
high = (tm->tm_year + 1900) / 1000;
low = (tm->tm_year + 1900) / 100;
low = low - high * 10;
buf[RTC_YEAR2] = (high << 4) + low;
high = (tm->tm_year + 1900) / 10;
low = tm->tm_year + 1900;
low = low - high * 10;
high = high - (high / 10) * 10;
buf[RTC_YEAR1] = (high << 4) + low;
high = tm->tm_mon / 10;
low = tm->tm_mon;
low = low - high * 10;
buf[RTC_MONTH] = (high << 4) + low;
high = tm->tm_mday / 10;
low = tm->tm_mday;
low = low - high * 10;
buf[RTC_DATE] = (high << 4) + low;
buf[RTC_WEEKDAY] = tm->tm_wday;
high = tm->tm_hour / 10;
low = tm->tm_hour;
low = low - high * 10;
buf[RTC_HOUR] = (high << 4) + low;
high = tm->tm_min / 10;
low = tm->tm_min;
low = low - high * 10;
buf[RTC_MIN] = (high << 4) + low;
high = tm->tm_sec / 10;
low = tm->tm_sec;
low = low - high * 10;
buf[RTC_SEC] = (high << 4) + low;
return 0;
}
static int max8925_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct max8925_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[TIME_NUM];
int ret;
ret = max8925_bulk_read(info->rtc, MAX8925_RTC_SEC, TIME_NUM, buf);
if (ret < 0)
goto out;
ret = tm_calc(tm, buf, TIME_NUM);
out:
return ret;
}
static int max8925_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct max8925_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[TIME_NUM];
int ret;
ret = data_calc(buf, tm, TIME_NUM);
if (ret < 0)
goto out;
ret = max8925_bulk_write(info->rtc, MAX8925_RTC_SEC, TIME_NUM, buf);
out:
return ret;
}
static int max8925_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct max8925_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[TIME_NUM];
int ret;
ret = max8925_bulk_read(info->rtc, MAX8925_ALARM0_SEC, TIME_NUM, buf);
if (ret < 0)
goto out;
ret = tm_calc(&alrm->time, buf, TIME_NUM);
if (ret < 0)
goto out;
ret = max8925_reg_read(info->rtc, MAX8925_RTC_IRQ_MASK);
if (ret < 0)
goto out;
if (ret & ALARM0_IRQ) {
alrm->enabled = 0;
} else {
ret = max8925_reg_read(info->rtc, MAX8925_ALARM0_CNTL);
if (ret < 0)
goto out;
if (!ret)
alrm->enabled = 0;
else
alrm->enabled = 1;
}
ret = max8925_reg_read(info->rtc, MAX8925_RTC_STATUS);
if (ret < 0)
goto out;
if (ret & ALARM0_STATUS)
alrm->pending = 1;
else
alrm->pending = 0;
return 0;
out:
return ret;
}
static int max8925_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct max8925_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[TIME_NUM];
int ret;
ret = data_calc(buf, &alrm->time, TIME_NUM);
if (ret < 0)
goto out;
ret = max8925_bulk_write(info->rtc, MAX8925_ALARM0_SEC, TIME_NUM, buf);
if (ret < 0)
goto out;
if (alrm->enabled)
/* only enable alarm on year/month/day/hour/min/sec */
ret = max8925_reg_write(info->rtc, MAX8925_ALARM0_CNTL, 0x77);
else
ret = max8925_reg_write(info->rtc, MAX8925_ALARM0_CNTL, 0x0);
out:
return ret;
}
static const struct rtc_class_ops max8925_rtc_ops = {
.read_time = max8925_rtc_read_time,
.set_time = max8925_rtc_set_time,
.read_alarm = max8925_rtc_read_alarm,
.set_alarm = max8925_rtc_set_alarm,
};
static int max8925_rtc_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_rtc_info *info;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(struct max8925_rtc_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->chip = chip;
info->rtc = chip->rtc;
info->dev = &pdev->dev;
info->irq = platform_get_irq(pdev, 0);
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
rtc_update_handler, IRQF_ONESHOT,
"rtc-alarm0", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq, ret);
return ret;
}
dev_set_drvdata(&pdev->dev, info);
/* XXX - isn't this redundant? */
platform_set_drvdata(pdev, info);
device_init_wakeup(&pdev->dev, 1);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max8925-rtc",
&max8925_rtc_ops, THIS_MODULE);
ret = PTR_ERR(info->rtc_dev);
if (IS_ERR(info->rtc_dev)) {
dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
return ret;
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int max8925_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag |= 1 << MAX8925_IRQ_RTC_ALARM0;
return 0;
}
static int max8925_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag &= ~(1 << MAX8925_IRQ_RTC_ALARM0);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(max8925_rtc_pm_ops, max8925_rtc_suspend, max8925_rtc_resume);
static struct platform_driver max8925_rtc_driver = {
.driver = {
.name = "max8925-rtc",
.pm = &max8925_rtc_pm_ops,
},
.probe = max8925_rtc_probe,
};
module_platform_driver(max8925_rtc_driver);
MODULE_DESCRIPTION("Maxim MAX8925 RTC driver");
MODULE_AUTHOR("Haojian Zhuang <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-max8925.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/rtc/rtc-spear.c
*
* Copyright (C) 2010 ST Microelectronics
* Rajeev Kumar<[email protected]>
*/
#include <linux/bcd.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
/* RTC registers */
#define TIME_REG 0x00
#define DATE_REG 0x04
#define ALARM_TIME_REG 0x08
#define ALARM_DATE_REG 0x0C
#define CTRL_REG 0x10
#define STATUS_REG 0x14
/* TIME_REG & ALARM_TIME_REG */
#define SECONDS_UNITS (0xf<<0) /* seconds units position */
#define SECONDS_TENS (0x7<<4) /* seconds tens position */
#define MINUTES_UNITS (0xf<<8) /* minutes units position */
#define MINUTES_TENS (0x7<<12) /* minutes tens position */
#define HOURS_UNITS (0xf<<16) /* hours units position */
#define HOURS_TENS (0x3<<20) /* hours tens position */
/* DATE_REG & ALARM_DATE_REG */
#define DAYS_UNITS (0xf<<0) /* days units position */
#define DAYS_TENS (0x3<<4) /* days tens position */
#define MONTHS_UNITS (0xf<<8) /* months units position */
#define MONTHS_TENS (0x1<<12) /* months tens position */
#define YEARS_UNITS (0xf<<16) /* years units position */
#define YEARS_TENS (0xf<<20) /* years tens position */
#define YEARS_HUNDREDS (0xf<<24) /* years hundereds position */
#define YEARS_MILLENIUMS (0xf<<28) /* years millenium position */
/* MASK SHIFT TIME_REG & ALARM_TIME_REG*/
#define SECOND_SHIFT 0x00 /* seconds units */
#define MINUTE_SHIFT 0x08 /* minutes units position */
#define HOUR_SHIFT 0x10 /* hours units position */
#define MDAY_SHIFT 0x00 /* Month day shift */
#define MONTH_SHIFT 0x08 /* Month shift */
#define YEAR_SHIFT 0x10 /* Year shift */
#define SECOND_MASK 0x7F
#define MIN_MASK 0x7F
#define HOUR_MASK 0x3F
#define DAY_MASK 0x3F
#define MONTH_MASK 0x7F
#define YEAR_MASK 0xFFFF
/* date reg equal to time reg, for debug only */
#define TIME_BYP (1<<9)
#define INT_ENABLE (1<<31) /* interrupt enable */
/* STATUS_REG */
#define CLK_UNCONNECTED (1<<0)
#define PEND_WR_TIME (1<<2)
#define PEND_WR_DATE (1<<3)
#define LOST_WR_TIME (1<<4)
#define LOST_WR_DATE (1<<5)
#define RTC_INT_MASK (1<<31)
#define STATUS_BUSY (PEND_WR_TIME | PEND_WR_DATE)
#define STATUS_FAIL (LOST_WR_TIME | LOST_WR_DATE)
struct spear_rtc_config {
struct rtc_device *rtc;
struct clk *clk;
spinlock_t lock;
void __iomem *ioaddr;
unsigned int irq_wake;
};
static inline void spear_rtc_clear_interrupt(struct spear_rtc_config *config)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&config->lock, flags);
val = readl(config->ioaddr + STATUS_REG);
val |= RTC_INT_MASK;
writel(val, config->ioaddr + STATUS_REG);
spin_unlock_irqrestore(&config->lock, flags);
}
static inline void spear_rtc_enable_interrupt(struct spear_rtc_config *config)
{
unsigned int val;
val = readl(config->ioaddr + CTRL_REG);
if (!(val & INT_ENABLE)) {
spear_rtc_clear_interrupt(config);
val |= INT_ENABLE;
writel(val, config->ioaddr + CTRL_REG);
}
}
static inline void spear_rtc_disable_interrupt(struct spear_rtc_config *config)
{
unsigned int val;
val = readl(config->ioaddr + CTRL_REG);
if (val & INT_ENABLE) {
val &= ~INT_ENABLE;
writel(val, config->ioaddr + CTRL_REG);
}
}
static inline int is_write_complete(struct spear_rtc_config *config)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&config->lock, flags);
if ((readl(config->ioaddr + STATUS_REG)) & STATUS_FAIL)
ret = -EIO;
spin_unlock_irqrestore(&config->lock, flags);
return ret;
}
static void rtc_wait_not_busy(struct spear_rtc_config *config)
{
int status, count = 0;
unsigned long flags;
/* Assuming BUSY may stay active for 80 msec) */
for (count = 0; count < 80; count++) {
spin_lock_irqsave(&config->lock, flags);
status = readl(config->ioaddr + STATUS_REG);
spin_unlock_irqrestore(&config->lock, flags);
if ((status & STATUS_BUSY) == 0)
break;
/* check status busy, after each msec */
msleep(1);
}
}
static irqreturn_t spear_rtc_irq(int irq, void *dev_id)
{
struct spear_rtc_config *config = dev_id;
unsigned long events = 0;
unsigned int irq_data;
spin_lock(&config->lock);
irq_data = readl(config->ioaddr + STATUS_REG);
spin_unlock(&config->lock);
if ((irq_data & RTC_INT_MASK)) {
spear_rtc_clear_interrupt(config);
events = RTC_IRQF | RTC_AF;
rtc_update_irq(config->rtc, 1, events);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
static void tm2bcd(struct rtc_time *tm)
{
tm->tm_sec = bin2bcd(tm->tm_sec);
tm->tm_min = bin2bcd(tm->tm_min);
tm->tm_hour = bin2bcd(tm->tm_hour);
tm->tm_mday = bin2bcd(tm->tm_mday);
tm->tm_mon = bin2bcd(tm->tm_mon + 1);
tm->tm_year = bin2bcd(tm->tm_year);
}
static void bcd2tm(struct rtc_time *tm)
{
tm->tm_sec = bcd2bin(tm->tm_sec);
tm->tm_min = bcd2bin(tm->tm_min);
tm->tm_hour = bcd2bin(tm->tm_hour);
tm->tm_mday = bcd2bin(tm->tm_mday);
tm->tm_mon = bcd2bin(tm->tm_mon) - 1;
/* epoch == 1900 */
tm->tm_year = bcd2bin(tm->tm_year);
}
/*
* spear_rtc_read_time - set the time
* @dev: rtc device in use
* @tm: holds date and time
*
* This function read time and date. On success it will return 0
* otherwise -ve error is returned.
*/
static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct spear_rtc_config *config = dev_get_drvdata(dev);
unsigned int time, date;
/* we don't report wday/yday/isdst ... */
rtc_wait_not_busy(config);
do {
time = readl(config->ioaddr + TIME_REG);
date = readl(config->ioaddr + DATE_REG);
} while (time == readl(config->ioaddr + TIME_REG));
tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
tm->tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
tm->tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
tm->tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
bcd2tm(tm);
return 0;
}
/*
* spear_rtc_set_time - set the time
* @dev: rtc device in use
* @tm: holds date and time
*
* This function set time and date. On success it will return 0
* otherwise -ve error is returned.
*/
static int spear_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct spear_rtc_config *config = dev_get_drvdata(dev);
unsigned int time, date;
tm2bcd(tm);
rtc_wait_not_busy(config);
time = (tm->tm_sec << SECOND_SHIFT) | (tm->tm_min << MINUTE_SHIFT) |
(tm->tm_hour << HOUR_SHIFT);
date = (tm->tm_mday << MDAY_SHIFT) | (tm->tm_mon << MONTH_SHIFT) |
(tm->tm_year << YEAR_SHIFT);
writel(time, config->ioaddr + TIME_REG);
writel(date, config->ioaddr + DATE_REG);
return is_write_complete(config);
}
/*
* spear_rtc_read_alarm - read the alarm time
* @dev: rtc device in use
* @alm: holds alarm date and time
*
* This function read alarm time and date. On success it will return 0
* otherwise -ve error is returned.
*/
static int spear_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct spear_rtc_config *config = dev_get_drvdata(dev);
unsigned int time, date;
rtc_wait_not_busy(config);
time = readl(config->ioaddr + ALARM_TIME_REG);
date = readl(config->ioaddr + ALARM_DATE_REG);
alm->time.tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
alm->time.tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
alm->time.tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
alm->time.tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
alm->time.tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
alm->time.tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
bcd2tm(&alm->time);
alm->enabled = readl(config->ioaddr + CTRL_REG) & INT_ENABLE;
return 0;
}
/*
* spear_rtc_set_alarm - set the alarm time
* @dev: rtc device in use
* @alm: holds alarm date and time
*
* This function set alarm time and date. On success it will return 0
* otherwise -ve error is returned.
*/
static int spear_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct spear_rtc_config *config = dev_get_drvdata(dev);
unsigned int time, date;
int err;
tm2bcd(&alm->time);
rtc_wait_not_busy(config);
time = (alm->time.tm_sec << SECOND_SHIFT) | (alm->time.tm_min <<
MINUTE_SHIFT) | (alm->time.tm_hour << HOUR_SHIFT);
date = (alm->time.tm_mday << MDAY_SHIFT) | (alm->time.tm_mon <<
MONTH_SHIFT) | (alm->time.tm_year << YEAR_SHIFT);
writel(time, config->ioaddr + ALARM_TIME_REG);
writel(date, config->ioaddr + ALARM_DATE_REG);
err = is_write_complete(config);
if (err < 0)
return err;
if (alm->enabled)
spear_rtc_enable_interrupt(config);
else
spear_rtc_disable_interrupt(config);
return 0;
}
static int spear_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct spear_rtc_config *config = dev_get_drvdata(dev);
int ret = 0;
spear_rtc_clear_interrupt(config);
switch (enabled) {
case 0:
/* alarm off */
spear_rtc_disable_interrupt(config);
break;
case 1:
/* alarm on */
spear_rtc_enable_interrupt(config);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static const struct rtc_class_ops spear_rtc_ops = {
.read_time = spear_rtc_read_time,
.set_time = spear_rtc_set_time,
.read_alarm = spear_rtc_read_alarm,
.set_alarm = spear_rtc_set_alarm,
.alarm_irq_enable = spear_alarm_irq_enable,
};
static int spear_rtc_probe(struct platform_device *pdev)
{
struct spear_rtc_config *config;
int status = 0;
int irq;
config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
config->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(config->rtc))
return PTR_ERR(config->rtc);
/* alarm irqs */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
status = devm_request_irq(&pdev->dev, irq, spear_rtc_irq, 0, pdev->name,
config);
if (status) {
dev_err(&pdev->dev, "Alarm interrupt IRQ%d already claimed\n",
irq);
return status;
}
config->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(config->ioaddr))
return PTR_ERR(config->ioaddr);
config->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(config->clk))
return PTR_ERR(config->clk);
status = clk_prepare_enable(config->clk);
if (status < 0)
return status;
spin_lock_init(&config->lock);
platform_set_drvdata(pdev, config);
config->rtc->ops = &spear_rtc_ops;
config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
config->rtc->range_max = RTC_TIMESTAMP_END_9999;
status = devm_rtc_register_device(config->rtc);
if (status)
goto err_disable_clock;
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
return 0;
err_disable_clock:
clk_disable_unprepare(config->clk);
return status;
}
static void spear_rtc_remove(struct platform_device *pdev)
{
struct spear_rtc_config *config = platform_get_drvdata(pdev);
spear_rtc_disable_interrupt(config);
clk_disable_unprepare(config->clk);
device_init_wakeup(&pdev->dev, 0);
}
#ifdef CONFIG_PM_SLEEP
static int spear_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spear_rtc_config *config = platform_get_drvdata(pdev);
int irq;
irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(&pdev->dev)) {
if (!enable_irq_wake(irq))
config->irq_wake = 1;
} else {
spear_rtc_disable_interrupt(config);
clk_disable(config->clk);
}
return 0;
}
static int spear_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spear_rtc_config *config = platform_get_drvdata(pdev);
int irq;
irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(&pdev->dev)) {
if (config->irq_wake) {
disable_irq_wake(irq);
config->irq_wake = 0;
}
} else {
clk_enable(config->clk);
spear_rtc_enable_interrupt(config);
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(spear_rtc_pm_ops, spear_rtc_suspend, spear_rtc_resume);
static void spear_rtc_shutdown(struct platform_device *pdev)
{
struct spear_rtc_config *config = platform_get_drvdata(pdev);
spear_rtc_disable_interrupt(config);
clk_disable(config->clk);
}
#ifdef CONFIG_OF
static const struct of_device_id spear_rtc_id_table[] = {
{ .compatible = "st,spear600-rtc" },
{}
};
MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
#endif
static struct platform_driver spear_rtc_driver = {
.probe = spear_rtc_probe,
.remove_new = spear_rtc_remove,
.shutdown = spear_rtc_shutdown,
.driver = {
.name = "rtc-spear",
.pm = &spear_rtc_pm_ops,
.of_match_table = of_match_ptr(spear_rtc_id_table),
},
};
module_platform_driver(spear_rtc_driver);
MODULE_ALIAS("platform:rtc-spear");
MODULE_AUTHOR("Rajeev Kumar <[email protected]>");
MODULE_DESCRIPTION("ST SPEAr Realtime Clock Driver (RTC)");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-spear.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Real-time clock driver for MPC5121
*
* Copyright 2007, Domen Puncer <[email protected]>
* Copyright 2008, Freescale Semiconductor, Inc. All rights reserved.
* Copyright 2011, Dmitry Eremin-Solenikov
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
struct mpc5121_rtc_regs {
u8 set_time; /* RTC + 0x00 */
u8 hour_set; /* RTC + 0x01 */
u8 minute_set; /* RTC + 0x02 */
u8 second_set; /* RTC + 0x03 */
u8 set_date; /* RTC + 0x04 */
u8 month_set; /* RTC + 0x05 */
u8 weekday_set; /* RTC + 0x06 */
u8 date_set; /* RTC + 0x07 */
u8 write_sw; /* RTC + 0x08 */
u8 sw_set; /* RTC + 0x09 */
u16 year_set; /* RTC + 0x0a */
u8 alm_enable; /* RTC + 0x0c */
u8 alm_hour_set; /* RTC + 0x0d */
u8 alm_min_set; /* RTC + 0x0e */
u8 int_enable; /* RTC + 0x0f */
u8 reserved1;
u8 hour; /* RTC + 0x11 */
u8 minute; /* RTC + 0x12 */
u8 second; /* RTC + 0x13 */
u8 month; /* RTC + 0x14 */
u8 wday_mday; /* RTC + 0x15 */
u16 year; /* RTC + 0x16 */
u8 int_alm; /* RTC + 0x18 */
u8 int_sw; /* RTC + 0x19 */
u8 alm_status; /* RTC + 0x1a */
u8 sw_minute; /* RTC + 0x1b */
u8 bus_error_1; /* RTC + 0x1c */
u8 int_day; /* RTC + 0x1d */
u8 int_min; /* RTC + 0x1e */
u8 int_sec; /* RTC + 0x1f */
/*
* target_time:
* intended to be used for hibernation but hibernation
* does not work on silicon rev 1.5 so use it for non-volatile
* storage of offset between the actual_time register and linux
* time
*/
u32 target_time; /* RTC + 0x20 */
/*
* actual_time:
* readonly time since VBAT_RTC was last connected
*/
u32 actual_time; /* RTC + 0x24 */
u32 keep_alive; /* RTC + 0x28 */
};
struct mpc5121_rtc_data {
unsigned irq;
unsigned irq_periodic;
struct mpc5121_rtc_regs __iomem *regs;
struct rtc_device *rtc;
struct rtc_wkalrm wkalarm;
};
/*
* Update second/minute/hour registers.
*
* This is just so alarm will work.
*/
static void mpc5121_rtc_update_smh(struct mpc5121_rtc_regs __iomem *regs,
struct rtc_time *tm)
{
out_8(®s->second_set, tm->tm_sec);
out_8(®s->minute_set, tm->tm_min);
out_8(®s->hour_set, tm->tm_hour);
/* set time sequence */
out_8(®s->set_time, 0x1);
out_8(®s->set_time, 0x3);
out_8(®s->set_time, 0x1);
out_8(®s->set_time, 0x0);
}
static int mpc5121_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
unsigned long now;
/*
* linux time is actual_time plus the offset saved in target_time
*/
now = in_be32(®s->actual_time) + in_be32(®s->target_time);
rtc_time64_to_tm(now, tm);
/*
* update second minute hour registers
* so alarms will work
*/
mpc5121_rtc_update_smh(regs, tm);
return 0;
}
static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
unsigned long now;
/*
* The actual_time register is read only so we write the offset
* between it and linux time to the target_time register.
*/
now = rtc_tm_to_time64(tm);
out_be32(®s->target_time, now - in_be32(®s->actual_time));
/*
* update second minute hour registers
* so alarms will work
*/
mpc5121_rtc_update_smh(regs, tm);
return 0;
}
static int mpc5200_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
int tmp;
tm->tm_sec = in_8(®s->second);
tm->tm_min = in_8(®s->minute);
/* 12 hour format? */
if (in_8(®s->hour) & 0x20)
tm->tm_hour = (in_8(®s->hour) >> 1) +
(in_8(®s->hour) & 1 ? 12 : 0);
else
tm->tm_hour = in_8(®s->hour);
tmp = in_8(®s->wday_mday);
tm->tm_mday = tmp & 0x1f;
tm->tm_mon = in_8(®s->month) - 1;
tm->tm_year = in_be16(®s->year) - 1900;
tm->tm_wday = (tmp >> 5) % 7;
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_isdst = 0;
return 0;
}
static int mpc5200_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
mpc5121_rtc_update_smh(regs, tm);
/* date */
out_8(®s->month_set, tm->tm_mon + 1);
out_8(®s->weekday_set, tm->tm_wday ? tm->tm_wday : 7);
out_8(®s->date_set, tm->tm_mday);
out_be16(®s->year_set, tm->tm_year + 1900);
/* set date sequence */
out_8(®s->set_date, 0x1);
out_8(®s->set_date, 0x3);
out_8(®s->set_date, 0x1);
out_8(®s->set_date, 0x0);
return 0;
}
static int mpc5121_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
*alarm = rtc->wkalarm;
alarm->pending = in_8(®s->alm_status);
return 0;
}
static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
alarm->time.tm_mday = -1;
alarm->time.tm_mon = -1;
alarm->time.tm_year = -1;
out_8(®s->alm_min_set, alarm->time.tm_min);
out_8(®s->alm_hour_set, alarm->time.tm_hour);
out_8(®s->alm_enable, alarm->enabled);
rtc->wkalarm = *alarm;
return 0;
}
static irqreturn_t mpc5121_rtc_handler(int irq, void *dev)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
if (in_8(®s->int_alm)) {
/* acknowledge and clear status */
out_8(®s->int_alm, 1);
out_8(®s->alm_status, 1);
rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t mpc5121_rtc_handler_upd(int irq, void *dev)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
if (in_8(®s->int_sec) && (in_8(®s->int_enable) & 0x1)) {
/* acknowledge */
out_8(®s->int_sec, 1);
rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_UF);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mpc5121_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
int val;
if (enabled)
val = 1;
else
val = 0;
out_8(®s->alm_enable, val);
rtc->wkalarm.enabled = val;
return 0;
}
static const struct rtc_class_ops mpc5121_rtc_ops = {
.read_time = mpc5121_rtc_read_time,
.set_time = mpc5121_rtc_set_time,
.read_alarm = mpc5121_rtc_read_alarm,
.set_alarm = mpc5121_rtc_set_alarm,
.alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
};
static const struct rtc_class_ops mpc5200_rtc_ops = {
.read_time = mpc5200_rtc_read_time,
.set_time = mpc5200_rtc_set_time,
.read_alarm = mpc5121_rtc_read_alarm,
.set_alarm = mpc5121_rtc_set_alarm,
.alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
};
static int mpc5121_rtc_probe(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc;
int err = 0;
rtc = devm_kzalloc(&op->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->regs = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(rtc->regs)) {
dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
return PTR_ERR(rtc->regs);
}
device_init_wakeup(&op->dev, 1);
platform_set_drvdata(op, rtc);
rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1);
err = devm_request_irq(&op->dev, rtc->irq, mpc5121_rtc_handler, 0,
"mpc5121-rtc", &op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq);
goto out_dispose;
}
rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0);
err = devm_request_irq(&op->dev, rtc->irq_periodic,
mpc5121_rtc_handler_upd, 0, "mpc5121-rtc_upd",
&op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq_periodic);
goto out_dispose2;
}
rtc->rtc = devm_rtc_allocate_device(&op->dev);
if (IS_ERR(rtc->rtc)) {
err = PTR_ERR(rtc->rtc);
goto out_dispose2;
}
rtc->rtc->ops = &mpc5200_rtc_ops;
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc->features);
rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */
if (of_device_is_compatible(op->dev.of_node, "fsl,mpc5121-rtc")) {
u32 ka;
ka = in_be32(&rtc->regs->keep_alive);
if (ka & 0x02) {
dev_warn(&op->dev,
"mpc5121-rtc: Battery or oscillator failure!\n");
out_be32(&rtc->regs->keep_alive, ka);
}
rtc->rtc->ops = &mpc5121_rtc_ops;
/*
* This is a limitation of the driver that abuses the target
* time register, the actual maximum year for the mpc5121 is
* also 4052.
*/
rtc->rtc->range_min = 0;
rtc->rtc->range_max = U32_MAX;
}
err = devm_rtc_register_device(rtc->rtc);
if (err)
goto out_dispose2;
return 0;
out_dispose2:
irq_dispose_mapping(rtc->irq_periodic);
out_dispose:
irq_dispose_mapping(rtc->irq);
return err;
}
static void mpc5121_rtc_remove(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc = platform_get_drvdata(op);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
/* disable interrupt, so there are no nasty surprises */
out_8(®s->alm_enable, 0);
out_8(®s->int_enable, in_8(®s->int_enable) & ~0x1);
irq_dispose_mapping(rtc->irq);
irq_dispose_mapping(rtc->irq_periodic);
}
#ifdef CONFIG_OF
static const struct of_device_id mpc5121_rtc_match[] = {
{ .compatible = "fsl,mpc5121-rtc", },
{ .compatible = "fsl,mpc5200-rtc", },
{},
};
MODULE_DEVICE_TABLE(of, mpc5121_rtc_match);
#endif
static struct platform_driver mpc5121_rtc_driver = {
.driver = {
.name = "mpc5121-rtc",
.of_match_table = of_match_ptr(mpc5121_rtc_match),
},
.probe = mpc5121_rtc_probe,
.remove_new = mpc5121_rtc_remove,
};
module_platform_driver(mpc5121_rtc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Rigby <[email protected]>");
| linux-master | drivers/rtc/rtc-mpc5121.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The RTC driver for Sunplus SP7021
*
* Copyright (C) 2019 Sunplus Technology Inc., All rights reseerved.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/rtc.h>
#define RTC_REG_NAME "rtc"
#define RTC_CTRL 0x40
#define TIMER_FREEZE_MASK_BIT BIT(5 + 16)
#define TIMER_FREEZE BIT(5)
#define DIS_SYS_RST_RTC_MASK_BIT BIT(4 + 16)
#define DIS_SYS_RST_RTC BIT(4)
#define RTC32K_MODE_RESET_MASK_BIT BIT(3 + 16)
#define RTC32K_MODE_RESET BIT(3)
#define ALARM_EN_OVERDUE_MASK_BIT BIT(2 + 16)
#define ALARM_EN_OVERDUE BIT(2)
#define ALARM_EN_PMC_MASK_BIT BIT(1 + 16)
#define ALARM_EN_PMC BIT(1)
#define ALARM_EN_MASK_BIT BIT(0 + 16)
#define ALARM_EN BIT(0)
#define RTC_TIMER_OUT 0x44
#define RTC_DIVIDER 0x48
#define RTC_TIMER_SET 0x4c
#define RTC_ALARM_SET 0x50
#define RTC_USER_DATA 0x54
#define RTC_RESET_RECORD 0x58
#define RTC_BATT_CHARGE_CTRL 0x5c
#define BAT_CHARGE_RSEL_MASK_BIT GENMASK(3 + 16, 2 + 16)
#define BAT_CHARGE_RSEL_MASK GENMASK(3, 2)
#define BAT_CHARGE_RSEL_2K_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 0)
#define BAT_CHARGE_RSEL_250_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 1)
#define BAT_CHARGE_RSEL_50_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 2)
#define BAT_CHARGE_RSEL_0_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 3)
#define BAT_CHARGE_DSEL_MASK_BIT BIT(1 + 16)
#define BAT_CHARGE_DSEL_MASK GENMASK(1, 1)
#define BAT_CHARGE_DSEL_ON FIELD_PREP(BAT_CHARGE_DSEL_MASK, 0)
#define BAT_CHARGE_DSEL_OFF FIELD_PREP(BAT_CHARGE_DSEL_MASK, 1)
#define BAT_CHARGE_EN_MASK_BIT BIT(0 + 16)
#define BAT_CHARGE_EN BIT(0)
#define RTC_TRIM_CTRL 0x60
struct sunplus_rtc {
struct rtc_device *rtc;
struct resource *res;
struct clk *rtcclk;
struct reset_control *rstc;
void __iomem *reg_base;
int irq;
};
static void sp_get_seconds(struct device *dev, unsigned long *secs)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
*secs = (unsigned long)readl(sp_rtc->reg_base + RTC_TIMER_OUT);
}
static void sp_set_seconds(struct device *dev, unsigned long secs)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
writel((u32)secs, sp_rtc->reg_base + RTC_TIMER_SET);
}
static int sp_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned long secs;
sp_get_seconds(dev, &secs);
rtc_time64_to_tm(secs, tm);
return 0;
}
static int sp_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned long secs;
secs = rtc_tm_to_time64(tm);
dev_dbg(dev, "%s, secs = %lu\n", __func__, secs);
sp_set_seconds(dev, secs);
return 0;
}
static int sp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
unsigned long alarm_time;
alarm_time = rtc_tm_to_time64(&alrm->time);
dev_dbg(dev, "%s, alarm_time: %u\n", __func__, (u32)(alarm_time));
writel((u32)alarm_time, sp_rtc->reg_base + RTC_ALARM_SET);
return 0;
}
static int sp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
unsigned int alarm_time;
alarm_time = readl(sp_rtc->reg_base + RTC_ALARM_SET);
dev_dbg(dev, "%s, alarm_time: %u\n", __func__, alarm_time);
if (alarm_time == 0)
alrm->enabled = 0;
else
alrm->enabled = 1;
rtc_time64_to_tm((unsigned long)(alarm_time), &alrm->time);
return 0;
}
static int sp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
if (enabled)
writel((TIMER_FREEZE_MASK_BIT | DIS_SYS_RST_RTC_MASK_BIT |
RTC32K_MODE_RESET_MASK_BIT | ALARM_EN_OVERDUE_MASK_BIT |
ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) |
(DIS_SYS_RST_RTC | ALARM_EN_OVERDUE | ALARM_EN_PMC | ALARM_EN),
sp_rtc->reg_base + RTC_CTRL);
else
writel((ALARM_EN_OVERDUE_MASK_BIT | ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) |
0x0, sp_rtc->reg_base + RTC_CTRL);
return 0;
}
static const struct rtc_class_ops sp_rtc_ops = {
.read_time = sp_rtc_read_time,
.set_time = sp_rtc_set_time,
.set_alarm = sp_rtc_set_alarm,
.read_alarm = sp_rtc_read_alarm,
.alarm_irq_enable = sp_rtc_alarm_irq_enable,
};
static irqreturn_t sp_rtc_irq_handler(int irq, void *dev_id)
{
struct platform_device *plat_dev = dev_id;
struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
rtc_update_irq(sp_rtc->rtc, 1, RTC_IRQF | RTC_AF);
dev_dbg(&plat_dev->dev, "[RTC] ALARM INT\n");
return IRQ_HANDLED;
}
/*
* -------------------------------------------------------------------------------------
* bat_charge_rsel bat_charge_dsel bat_charge_en Remarks
* x x 0 Disable
* 0 0 1 0.86mA (2K Ohm with diode)
* 1 0 1 1.81mA (250 Ohm with diode)
* 2 0 1 2.07mA (50 Ohm with diode)
* 3 0 1 16.0mA (0 Ohm with diode)
* 0 1 1 1.36mA (2K Ohm without diode)
* 1 1 1 3.99mA (250 Ohm without diode)
* 2 1 1 4.41mA (50 Ohm without diode)
* 3 1 1 16.0mA (0 Ohm without diode)
* -------------------------------------------------------------------------------------
*/
static void sp_rtc_set_trickle_charger(struct device dev)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(&dev);
u32 ohms, rsel;
u32 chargeable;
if (of_property_read_u32(dev.of_node, "trickle-resistor-ohms", &ohms) ||
of_property_read_u32(dev.of_node, "aux-voltage-chargeable", &chargeable)) {
dev_warn(&dev, "battery charger disabled\n");
return;
}
switch (ohms) {
case 2000:
rsel = BAT_CHARGE_RSEL_2K_OHM;
break;
case 250:
rsel = BAT_CHARGE_RSEL_250_OHM;
break;
case 50:
rsel = BAT_CHARGE_RSEL_50_OHM;
break;
case 0:
rsel = BAT_CHARGE_RSEL_0_OHM;
break;
default:
dev_err(&dev, "invalid charger resistor value (%d)\n", ohms);
return;
}
writel(BAT_CHARGE_RSEL_MASK_BIT | rsel, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
switch (chargeable) {
case 0:
writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_OFF,
sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
break;
case 1:
writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_ON,
sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
break;
default:
dev_err(&dev, "invalid aux-voltage-chargeable value (%d)\n", chargeable);
return;
}
writel(BAT_CHARGE_EN_MASK_BIT | BAT_CHARGE_EN, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
}
static int sp_rtc_probe(struct platform_device *plat_dev)
{
struct sunplus_rtc *sp_rtc;
int ret;
sp_rtc = devm_kzalloc(&plat_dev->dev, sizeof(*sp_rtc), GFP_KERNEL);
if (!sp_rtc)
return -ENOMEM;
sp_rtc->reg_base = devm_platform_ioremap_resource_byname(plat_dev, RTC_REG_NAME);
if (IS_ERR(sp_rtc->reg_base))
return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base),
"%s devm_ioremap_resource fail\n", RTC_REG_NAME);
dev_dbg(&plat_dev->dev, "res = %pR, reg_base = %p\n",
sp_rtc->res, sp_rtc->reg_base);
sp_rtc->irq = platform_get_irq(plat_dev, 0);
if (sp_rtc->irq < 0)
return sp_rtc->irq;
ret = devm_request_irq(&plat_dev->dev, sp_rtc->irq, sp_rtc_irq_handler,
IRQF_TRIGGER_RISING, "rtc irq", plat_dev);
if (ret)
return dev_err_probe(&plat_dev->dev, ret, "devm_request_irq failed:\n");
sp_rtc->rtcclk = devm_clk_get(&plat_dev->dev, NULL);
if (IS_ERR(sp_rtc->rtcclk))
return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rtcclk),
"devm_clk_get fail\n");
sp_rtc->rstc = devm_reset_control_get_exclusive(&plat_dev->dev, NULL);
if (IS_ERR(sp_rtc->rstc))
return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rstc),
"failed to retrieve reset controller\n");
ret = clk_prepare_enable(sp_rtc->rtcclk);
if (ret)
goto free_clk;
ret = reset_control_deassert(sp_rtc->rstc);
if (ret)
goto free_reset_assert;
device_init_wakeup(&plat_dev->dev, 1);
dev_set_drvdata(&plat_dev->dev, sp_rtc);
sp_rtc->rtc = devm_rtc_allocate_device(&plat_dev->dev);
if (IS_ERR(sp_rtc->rtc)) {
ret = PTR_ERR(sp_rtc->rtc);
goto free_reset_assert;
}
sp_rtc->rtc->range_max = U32_MAX;
sp_rtc->rtc->range_min = 0;
sp_rtc->rtc->ops = &sp_rtc_ops;
ret = devm_rtc_register_device(sp_rtc->rtc);
if (ret)
goto free_reset_assert;
/* Setup trickle charger */
if (plat_dev->dev.of_node)
sp_rtc_set_trickle_charger(plat_dev->dev);
/* Keep RTC from system reset */
writel(DIS_SYS_RST_RTC_MASK_BIT | DIS_SYS_RST_RTC, sp_rtc->reg_base + RTC_CTRL);
return 0;
free_reset_assert:
reset_control_assert(sp_rtc->rstc);
free_clk:
clk_disable_unprepare(sp_rtc->rtcclk);
return ret;
}
static void sp_rtc_remove(struct platform_device *plat_dev)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
device_init_wakeup(&plat_dev->dev, 0);
reset_control_assert(sp_rtc->rstc);
clk_disable_unprepare(sp_rtc->rtcclk);
}
#ifdef CONFIG_PM_SLEEP
static int sp_rtc_suspend(struct device *dev)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(sp_rtc->irq);
return 0;
}
static int sp_rtc_resume(struct device *dev)
{
struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(sp_rtc->irq);
return 0;
}
#endif
static const struct of_device_id sp_rtc_of_match[] = {
{ .compatible = "sunplus,sp7021-rtc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sp_rtc_of_match);
static SIMPLE_DEV_PM_OPS(sp_rtc_pm_ops, sp_rtc_suspend, sp_rtc_resume);
static struct platform_driver sp_rtc_driver = {
.probe = sp_rtc_probe,
.remove_new = sp_rtc_remove,
.driver = {
.name = "sp7021-rtc",
.of_match_table = sp_rtc_of_match,
.pm = &sp_rtc_pm_ops,
},
};
module_platform_driver(sp_rtc_driver);
MODULE_AUTHOR("Vincent Shih <[email protected]>");
MODULE_DESCRIPTION("Sunplus RTC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-sunplus.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Real Time Clock driver for Wolfson Microelectronics WM831x
*
* Copyright (C) 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <[email protected]>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/completion.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/random.h>
/*
* R16416 (0x4020) - RTC Write Counter
*/
#define WM831X_RTC_WR_CNT_MASK 0xFFFF /* RTC_WR_CNT - [15:0] */
#define WM831X_RTC_WR_CNT_SHIFT 0 /* RTC_WR_CNT - [15:0] */
#define WM831X_RTC_WR_CNT_WIDTH 16 /* RTC_WR_CNT - [15:0] */
/*
* R16417 (0x4021) - RTC Time 1
*/
#define WM831X_RTC_TIME_MASK 0xFFFF /* RTC_TIME - [15:0] */
#define WM831X_RTC_TIME_SHIFT 0 /* RTC_TIME - [15:0] */
#define WM831X_RTC_TIME_WIDTH 16 /* RTC_TIME - [15:0] */
/*
* R16418 (0x4022) - RTC Time 2
*/
#define WM831X_RTC_TIME_MASK 0xFFFF /* RTC_TIME - [15:0] */
#define WM831X_RTC_TIME_SHIFT 0 /* RTC_TIME - [15:0] */
#define WM831X_RTC_TIME_WIDTH 16 /* RTC_TIME - [15:0] */
/*
* R16419 (0x4023) - RTC Alarm 1
*/
#define WM831X_RTC_ALM_MASK 0xFFFF /* RTC_ALM - [15:0] */
#define WM831X_RTC_ALM_SHIFT 0 /* RTC_ALM - [15:0] */
#define WM831X_RTC_ALM_WIDTH 16 /* RTC_ALM - [15:0] */
/*
* R16420 (0x4024) - RTC Alarm 2
*/
#define WM831X_RTC_ALM_MASK 0xFFFF /* RTC_ALM - [15:0] */
#define WM831X_RTC_ALM_SHIFT 0 /* RTC_ALM - [15:0] */
#define WM831X_RTC_ALM_WIDTH 16 /* RTC_ALM - [15:0] */
/*
* R16421 (0x4025) - RTC Control
*/
#define WM831X_RTC_VALID 0x8000 /* RTC_VALID */
#define WM831X_RTC_VALID_MASK 0x8000 /* RTC_VALID */
#define WM831X_RTC_VALID_SHIFT 15 /* RTC_VALID */
#define WM831X_RTC_VALID_WIDTH 1 /* RTC_VALID */
#define WM831X_RTC_SYNC_BUSY 0x4000 /* RTC_SYNC_BUSY */
#define WM831X_RTC_SYNC_BUSY_MASK 0x4000 /* RTC_SYNC_BUSY */
#define WM831X_RTC_SYNC_BUSY_SHIFT 14 /* RTC_SYNC_BUSY */
#define WM831X_RTC_SYNC_BUSY_WIDTH 1 /* RTC_SYNC_BUSY */
#define WM831X_RTC_ALM_ENA 0x0400 /* RTC_ALM_ENA */
#define WM831X_RTC_ALM_ENA_MASK 0x0400 /* RTC_ALM_ENA */
#define WM831X_RTC_ALM_ENA_SHIFT 10 /* RTC_ALM_ENA */
#define WM831X_RTC_ALM_ENA_WIDTH 1 /* RTC_ALM_ENA */
#define WM831X_RTC_PINT_FREQ_MASK 0x0070 /* RTC_PINT_FREQ - [6:4] */
#define WM831X_RTC_PINT_FREQ_SHIFT 4 /* RTC_PINT_FREQ - [6:4] */
#define WM831X_RTC_PINT_FREQ_WIDTH 3 /* RTC_PINT_FREQ - [6:4] */
/*
* R16422 (0x4026) - RTC Trim
*/
#define WM831X_RTC_TRIM_MASK 0x03FF /* RTC_TRIM - [9:0] */
#define WM831X_RTC_TRIM_SHIFT 0 /* RTC_TRIM - [9:0] */
#define WM831X_RTC_TRIM_WIDTH 10 /* RTC_TRIM - [9:0] */
#define WM831X_SET_TIME_RETRIES 5
#define WM831X_GET_TIME_RETRIES 5
struct wm831x_rtc {
struct wm831x *wm831x;
struct rtc_device *rtc;
unsigned int alarm_enabled:1;
};
static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
{
int ret;
u16 reg;
/*
* The write counter contains a pseudo-random number which is
* regenerated every time we set the RTC so it should be a
* useful per-system source of entropy.
*/
ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
if (ret >= 0) {
reg = ret;
add_device_randomness(®, sizeof(reg));
} else {
dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
ret);
}
}
/*
* Read current time and date in RTC
*/
static int wm831x_rtc_readtime(struct device *dev, struct rtc_time *tm)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
struct wm831x *wm831x = wm831x_rtc->wm831x;
u16 time1[2], time2[2];
int ret;
int count = 0;
/* Has the RTC been programmed? */
ret = wm831x_reg_read(wm831x, WM831X_RTC_CONTROL);
if (ret < 0) {
dev_err(dev, "Failed to read RTC control: %d\n", ret);
return ret;
}
if (!(ret & WM831X_RTC_VALID)) {
dev_dbg(dev, "RTC not yet configured\n");
return -EINVAL;
}
/* Read twice to make sure we don't read a corrupt, partially
* incremented, value.
*/
do {
ret = wm831x_bulk_read(wm831x, WM831X_RTC_TIME_1,
2, time1);
if (ret != 0)
continue;
ret = wm831x_bulk_read(wm831x, WM831X_RTC_TIME_1,
2, time2);
if (ret != 0)
continue;
if (memcmp(time1, time2, sizeof(time1)) == 0) {
u32 time = (time1[0] << 16) | time1[1];
rtc_time64_to_tm(time, tm);
return 0;
}
} while (++count < WM831X_GET_TIME_RETRIES);
dev_err(dev, "Timed out reading current time\n");
return -EIO;
}
/*
* Set current time and date in RTC
*/
static int wm831x_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
struct wm831x *wm831x = wm831x_rtc->wm831x;
struct rtc_time new_tm;
unsigned long time, new_time;
int ret;
int count = 0;
time = rtc_tm_to_time64(tm);
ret = wm831x_reg_write(wm831x, WM831X_RTC_TIME_1,
(time >> 16) & 0xffff);
if (ret < 0) {
dev_err(dev, "Failed to write TIME_1: %d\n", ret);
return ret;
}
ret = wm831x_reg_write(wm831x, WM831X_RTC_TIME_2, time & 0xffff);
if (ret < 0) {
dev_err(dev, "Failed to write TIME_2: %d\n", ret);
return ret;
}
/* Wait for the update to complete - should happen first time
* round but be conservative.
*/
do {
msleep(1);
ret = wm831x_reg_read(wm831x, WM831X_RTC_CONTROL);
if (ret < 0)
ret = WM831X_RTC_SYNC_BUSY;
} while (!(ret & WM831X_RTC_SYNC_BUSY) &&
++count < WM831X_SET_TIME_RETRIES);
if (ret & WM831X_RTC_SYNC_BUSY) {
dev_err(dev, "Timed out writing RTC update\n");
return -EIO;
}
/* Check that the update was accepted; security features may
* have caused the update to be ignored.
*/
ret = wm831x_rtc_readtime(dev, &new_tm);
if (ret < 0)
return ret;
new_time = rtc_tm_to_time64(&new_tm);
/* Allow a second of change in case of tick */
if (new_time - time > 1) {
dev_err(dev, "RTC update not permitted by hardware\n");
return -EPERM;
}
return 0;
}
/*
* Read alarm time and date in RTC
*/
static int wm831x_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
int ret;
u16 data[2];
u32 time;
ret = wm831x_bulk_read(wm831x_rtc->wm831x, WM831X_RTC_ALARM_1,
2, data);
if (ret != 0) {
dev_err(dev, "Failed to read alarm time: %d\n", ret);
return ret;
}
time = (data[0] << 16) | data[1];
rtc_time64_to_tm(time, &alrm->time);
ret = wm831x_reg_read(wm831x_rtc->wm831x, WM831X_RTC_CONTROL);
if (ret < 0) {
dev_err(dev, "Failed to read RTC control: %d\n", ret);
return ret;
}
if (ret & WM831X_RTC_ALM_ENA)
alrm->enabled = 1;
else
alrm->enabled = 0;
return 0;
}
static int wm831x_rtc_stop_alarm(struct wm831x_rtc *wm831x_rtc)
{
wm831x_rtc->alarm_enabled = 0;
return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
WM831X_RTC_ALM_ENA, 0);
}
static int wm831x_rtc_start_alarm(struct wm831x_rtc *wm831x_rtc)
{
wm831x_rtc->alarm_enabled = 1;
return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
WM831X_RTC_ALM_ENA, WM831X_RTC_ALM_ENA);
}
static int wm831x_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
struct wm831x *wm831x = wm831x_rtc->wm831x;
int ret;
unsigned long time;
time = rtc_tm_to_time64(&alrm->time);
ret = wm831x_rtc_stop_alarm(wm831x_rtc);
if (ret < 0) {
dev_err(dev, "Failed to stop alarm: %d\n", ret);
return ret;
}
ret = wm831x_reg_write(wm831x, WM831X_RTC_ALARM_1,
(time >> 16) & 0xffff);
if (ret < 0) {
dev_err(dev, "Failed to write ALARM_1: %d\n", ret);
return ret;
}
ret = wm831x_reg_write(wm831x, WM831X_RTC_ALARM_2, time & 0xffff);
if (ret < 0) {
dev_err(dev, "Failed to write ALARM_2: %d\n", ret);
return ret;
}
if (alrm->enabled) {
ret = wm831x_rtc_start_alarm(wm831x_rtc);
if (ret < 0) {
dev_err(dev, "Failed to start alarm: %d\n", ret);
return ret;
}
}
return 0;
}
static int wm831x_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
if (enabled)
return wm831x_rtc_start_alarm(wm831x_rtc);
else
return wm831x_rtc_stop_alarm(wm831x_rtc);
}
static irqreturn_t wm831x_alm_irq(int irq, void *data)
{
struct wm831x_rtc *wm831x_rtc = data;
rtc_update_irq(wm831x_rtc->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops wm831x_rtc_ops = {
.read_time = wm831x_rtc_readtime,
.set_time = wm831x_rtc_settime,
.read_alarm = wm831x_rtc_readalarm,
.set_alarm = wm831x_rtc_setalarm,
.alarm_irq_enable = wm831x_rtc_alarm_irq_enable,
};
#ifdef CONFIG_PM
/* Turn off the alarm if it should not be a wake source. */
static int wm831x_rtc_suspend(struct device *dev)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
int ret, enable;
if (wm831x_rtc->alarm_enabled && device_may_wakeup(dev))
enable = WM831X_RTC_ALM_ENA;
else
enable = 0;
ret = wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
WM831X_RTC_ALM_ENA, enable);
if (ret != 0)
dev_err(dev, "Failed to update RTC alarm: %d\n", ret);
return 0;
}
/* Enable the alarm if it should be enabled (in case it was disabled to
* prevent use as a wake source).
*/
static int wm831x_rtc_resume(struct device *dev)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
int ret;
if (wm831x_rtc->alarm_enabled) {
ret = wm831x_rtc_start_alarm(wm831x_rtc);
if (ret != 0)
dev_err(dev, "Failed to restart RTC alarm: %d\n", ret);
}
return 0;
}
/* Unconditionally disable the alarm */
static int wm831x_rtc_freeze(struct device *dev)
{
struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
int ret;
ret = wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
WM831X_RTC_ALM_ENA, 0);
if (ret != 0)
dev_err(dev, "Failed to stop RTC alarm: %d\n", ret);
return 0;
}
#else
#define wm831x_rtc_suspend NULL
#define wm831x_rtc_resume NULL
#define wm831x_rtc_freeze NULL
#endif
static int wm831x_rtc_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_rtc *wm831x_rtc;
int alm_irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "ALM"));
int ret = 0;
wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
if (wm831x_rtc == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, wm831x_rtc);
wm831x_rtc->wm831x = wm831x;
ret = wm831x_reg_read(wm831x, WM831X_RTC_CONTROL);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read RTC control: %d\n", ret);
return ret;
}
if (ret & WM831X_RTC_ALM_ENA)
wm831x_rtc->alarm_enabled = 1;
device_init_wakeup(&pdev->dev, 1);
wm831x_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(wm831x_rtc->rtc))
return PTR_ERR(wm831x_rtc->rtc);
wm831x_rtc->rtc->ops = &wm831x_rtc_ops;
wm831x_rtc->rtc->range_max = U32_MAX;
ret = devm_rtc_register_device(wm831x_rtc->rtc);
if (ret)
return ret;
ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL,
wm831x_alm_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"RTC alarm",
wm831x_rtc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
alm_irq, ret);
}
wm831x_rtc_add_randomness(wm831x);
return 0;
}
static const struct dev_pm_ops wm831x_rtc_pm_ops = {
.suspend = wm831x_rtc_suspend,
.resume = wm831x_rtc_resume,
.freeze = wm831x_rtc_freeze,
.thaw = wm831x_rtc_resume,
.restore = wm831x_rtc_resume,
.poweroff = wm831x_rtc_suspend,
};
static struct platform_driver wm831x_rtc_driver = {
.probe = wm831x_rtc_probe,
.driver = {
.name = "wm831x-rtc",
.pm = &wm831x_rtc_pm_ops,
},
};
module_platform_driver(wm831x_rtc_driver);
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_DESCRIPTION("RTC driver for the WM831x series PMICs");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-rtc");
| linux-master | drivers/rtc/rtc-wm831x.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Faraday Technology FTRTC010 driver
*
* Copyright (C) 2009 Janos Laube <[email protected]>
*
* Original code for older kernel 2.6.15 are from Stormlinksemi
* first update from Janos Laube for > 2.6.29 kernels
*
* checkpatch fixes and usage of rtc-lib code
* Hans Ulli Kroll <[email protected]>
*/
#include <linux/rtc.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/clk.h>
#define DRV_NAME "rtc-ftrtc010"
MODULE_AUTHOR("Hans Ulli Kroll <[email protected]>");
MODULE_DESCRIPTION("RTC driver for Gemini SoC");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
struct ftrtc010_rtc {
struct rtc_device *rtc_dev;
void __iomem *rtc_base;
int rtc_irq;
struct clk *pclk;
struct clk *extclk;
};
enum ftrtc010_rtc_offsets {
FTRTC010_RTC_SECOND = 0x00,
FTRTC010_RTC_MINUTE = 0x04,
FTRTC010_RTC_HOUR = 0x08,
FTRTC010_RTC_DAYS = 0x0C,
FTRTC010_RTC_ALARM_SECOND = 0x10,
FTRTC010_RTC_ALARM_MINUTE = 0x14,
FTRTC010_RTC_ALARM_HOUR = 0x18,
FTRTC010_RTC_RECORD = 0x1C,
FTRTC010_RTC_CR = 0x20,
};
static irqreturn_t ftrtc010_rtc_interrupt(int irq, void *dev)
{
return IRQ_HANDLED;
}
/*
* Looks like the RTC in the Gemini SoC is (totaly) broken
* We can't read/write directly the time from RTC registers.
* We must do some "offset" calculation to get the real time
*
* This FIX works pretty fine and Stormlinksemi aka Cortina-Networks does
* the same thing, without the rtc-lib.c calls.
*/
static int ftrtc010_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct ftrtc010_rtc *rtc = dev_get_drvdata(dev);
u32 days, hour, min, sec, offset;
timeu64_t time;
sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
days = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
offset = readl(rtc->rtc_base + FTRTC010_RTC_RECORD);
time = offset + days * 86400 + hour * 3600 + min * 60 + sec;
rtc_time64_to_tm(time, tm);
return 0;
}
static int ftrtc010_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct ftrtc010_rtc *rtc = dev_get_drvdata(dev);
u32 sec, min, hour, day, offset;
timeu64_t time;
time = rtc_tm_to_time64(tm);
sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
day = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
offset = time - (day * 86400 + hour * 3600 + min * 60 + sec);
writel(offset, rtc->rtc_base + FTRTC010_RTC_RECORD);
writel(0x01, rtc->rtc_base + FTRTC010_RTC_CR);
return 0;
}
static const struct rtc_class_ops ftrtc010_rtc_ops = {
.read_time = ftrtc010_rtc_read_time,
.set_time = ftrtc010_rtc_set_time,
};
static int ftrtc010_rtc_probe(struct platform_device *pdev)
{
u32 days, hour, min, sec;
struct ftrtc010_rtc *rtc;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc))
return -ENOMEM;
platform_set_drvdata(pdev, rtc);
rtc->pclk = devm_clk_get(dev, "PCLK");
if (IS_ERR(rtc->pclk)) {
dev_err(dev, "could not get PCLK\n");
} else {
ret = clk_prepare_enable(rtc->pclk);
if (ret) {
dev_err(dev, "failed to enable PCLK\n");
return ret;
}
}
rtc->extclk = devm_clk_get(dev, "EXTCLK");
if (IS_ERR(rtc->extclk)) {
dev_err(dev, "could not get EXTCLK\n");
} else {
ret = clk_prepare_enable(rtc->extclk);
if (ret) {
dev_err(dev, "failed to enable EXTCLK\n");
goto err_disable_pclk;
}
}
rtc->rtc_irq = platform_get_irq(pdev, 0);
if (rtc->rtc_irq < 0) {
ret = rtc->rtc_irq;
goto err_disable_extclk;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto err_disable_extclk;
}
rtc->rtc_base = devm_ioremap(dev, res->start,
resource_size(res));
if (!rtc->rtc_base) {
ret = -ENOMEM;
goto err_disable_extclk;
}
rtc->rtc_dev = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc_dev)) {
ret = PTR_ERR(rtc->rtc_dev);
goto err_disable_extclk;
}
rtc->rtc_dev->ops = &ftrtc010_rtc_ops;
sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
days = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
rtc->rtc_dev->range_min = (u64)days * 86400 + hour * 3600 +
min * 60 + sec;
rtc->rtc_dev->range_max = U32_MAX + rtc->rtc_dev->range_min;
ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
goto err_disable_extclk;
return devm_rtc_register_device(rtc->rtc_dev);
err_disable_extclk:
clk_disable_unprepare(rtc->extclk);
err_disable_pclk:
clk_disable_unprepare(rtc->pclk);
return ret;
}
static void ftrtc010_rtc_remove(struct platform_device *pdev)
{
struct ftrtc010_rtc *rtc = platform_get_drvdata(pdev);
if (!IS_ERR(rtc->extclk))
clk_disable_unprepare(rtc->extclk);
if (!IS_ERR(rtc->pclk))
clk_disable_unprepare(rtc->pclk);
}
static const struct of_device_id ftrtc010_rtc_dt_match[] = {
{ .compatible = "cortina,gemini-rtc" },
{ .compatible = "faraday,ftrtc010" },
{ }
};
MODULE_DEVICE_TABLE(of, ftrtc010_rtc_dt_match);
static struct platform_driver ftrtc010_rtc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = ftrtc010_rtc_dt_match,
},
.probe = ftrtc010_rtc_probe,
.remove_new = ftrtc010_rtc_remove,
};
module_platform_driver_probe(ftrtc010_rtc_driver, ftrtc010_rtc_probe);
| linux-master | drivers/rtc/rtc-ftrtc010.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RTC driver for Rockchip RK808
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Chris Zhong <[email protected]>
* Author: Zhang Qing <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/mfd/rk808.h>
#include <linux/platform_device.h>
/* RTC_CTRL_REG bitfields */
#define BIT_RTC_CTRL_REG_STOP_RTC_M BIT(0)
/* RK808 has a shadowed register for saving a "frozen" RTC time.
* When user setting "GET_TIME" to 1, the time will save in this shadowed
* register. If set "READSEL" to 1, user read rtc time register, actually
* get the time of that moment. If we need the real time, clr this bit.
*/
#define BIT_RTC_CTRL_REG_RTC_GET_TIME BIT(6)
#define BIT_RTC_CTRL_REG_RTC_READSEL_M BIT(7)
#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M BIT(3)
#define RTC_STATUS_MASK 0xFE
#define SECONDS_REG_MSK 0x7F
#define MINUTES_REG_MAK 0x7F
#define HOURS_REG_MSK 0x3F
#define DAYS_REG_MSK 0x3F
#define MONTHS_REG_MSK 0x1F
#define YEARS_REG_MSK 0xFF
#define WEEKS_REG_MSK 0x7
/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */
#define NUM_TIME_REGS (RK808_WEEKS_REG - RK808_SECONDS_REG + 1)
#define NUM_ALARM_REGS (RK808_ALARM_YEARS_REG - RK808_ALARM_SECONDS_REG + 1)
struct rk_rtc_compat_reg {
unsigned int ctrl_reg;
unsigned int status_reg;
unsigned int alarm_seconds_reg;
unsigned int int_reg;
unsigned int seconds_reg;
};
struct rk808_rtc {
struct regmap *regmap;
struct rtc_device *rtc;
struct rk_rtc_compat_reg *creg;
int irq;
};
/*
* The Rockchip calendar used by the RK808 counts November with 31 days. We use
* these translation functions to convert its dates to/from the Gregorian
* calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016
* as the day when both calendars were in sync, and treat all other dates
* relative to that.
* NOTE: Other system software (e.g. firmware) that reads the same hardware must
* implement this exact same conversion algorithm, with the same anchor date.
*/
static time64_t nov2dec_transitions(struct rtc_time *tm)
{
return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0);
}
static void rockchip_to_gregorian(struct rtc_time *tm)
{
/* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */
time64_t time = rtc_tm_to_time64(tm);
rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm);
}
static void gregorian_to_rockchip(struct rtc_time *tm)
{
time64_t extra_days = nov2dec_transitions(tm);
time64_t time = rtc_tm_to_time64(tm);
rtc_time64_to_tm(time - extra_days * 86400, tm);
/* Compensate if we went back over Nov 31st (will work up to 2381) */
if (nov2dec_transitions(tm) < extra_days) {
if (tm->tm_mon + 1 == 11)
tm->tm_mday++; /* This may result in 31! */
else
rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm);
}
}
/* Read current time and date in RTC */
static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
{
struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
u8 rtc_data[NUM_TIME_REGS];
int ret;
/* Force an update of the shadowed registers right now */
ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg,
BIT_RTC_CTRL_REG_RTC_GET_TIME,
BIT_RTC_CTRL_REG_RTC_GET_TIME);
if (ret) {
dev_err(dev, "Failed to update bits rtc_ctrl: %d\n", ret);
return ret;
}
/*
* After we set the GET_TIME bit, the rtc time can't be read
* immediately. So we should wait up to 31.25 us, about one cycle of
* 32khz. If we clear the GET_TIME bit here, the time of i2c transfer
* certainly more than 31.25us: 16 * 2.5us at 400kHz bus frequency.
*/
ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg,
BIT_RTC_CTRL_REG_RTC_GET_TIME,
0);
if (ret) {
dev_err(dev, "Failed to update bits rtc_ctrl: %d\n", ret);
return ret;
}
ret = regmap_bulk_read(rk808_rtc->regmap, rk808_rtc->creg->seconds_reg,
rtc_data, NUM_TIME_REGS);
if (ret) {
dev_err(dev, "Failed to bulk read rtc_data: %d\n", ret);
return ret;
}
tm->tm_sec = bcd2bin(rtc_data[0] & SECONDS_REG_MSK);
tm->tm_min = bcd2bin(rtc_data[1] & MINUTES_REG_MAK);
tm->tm_hour = bcd2bin(rtc_data[2] & HOURS_REG_MSK);
tm->tm_mday = bcd2bin(rtc_data[3] & DAYS_REG_MSK);
tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1;
tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100;
tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK);
rockchip_to_gregorian(tm);
dev_dbg(dev, "RTC date/time %ptRd(%d) %ptRt\n", tm, tm->tm_wday, tm);
return ret;
}
/* Set current time and date in RTC */
static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
u8 rtc_data[NUM_TIME_REGS];
int ret;
dev_dbg(dev, "set RTC date/time %ptRd(%d) %ptRt\n", tm, tm->tm_wday, tm);
gregorian_to_rockchip(tm);
rtc_data[0] = bin2bcd(tm->tm_sec);
rtc_data[1] = bin2bcd(tm->tm_min);
rtc_data[2] = bin2bcd(tm->tm_hour);
rtc_data[3] = bin2bcd(tm->tm_mday);
rtc_data[4] = bin2bcd(tm->tm_mon + 1);
rtc_data[5] = bin2bcd(tm->tm_year - 100);
rtc_data[6] = bin2bcd(tm->tm_wday);
/* Stop RTC while updating the RTC registers */
ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg,
BIT_RTC_CTRL_REG_STOP_RTC_M,
BIT_RTC_CTRL_REG_STOP_RTC_M);
if (ret) {
dev_err(dev, "Failed to update RTC control: %d\n", ret);
return ret;
}
ret = regmap_bulk_write(rk808_rtc->regmap, rk808_rtc->creg->seconds_reg,
rtc_data, NUM_TIME_REGS);
if (ret) {
dev_err(dev, "Failed to bull write rtc_data: %d\n", ret);
return ret;
}
/* Start RTC again */
ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg,
BIT_RTC_CTRL_REG_STOP_RTC_M, 0);
if (ret) {
dev_err(dev, "Failed to update RTC control: %d\n", ret);
return ret;
}
return 0;
}
/* Read alarm time and date in RTC */
static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
u8 alrm_data[NUM_ALARM_REGS];
uint32_t int_reg;
int ret;
ret = regmap_bulk_read(rk808_rtc->regmap,
rk808_rtc->creg->alarm_seconds_reg,
alrm_data, NUM_ALARM_REGS);
if (ret) {
dev_err(dev, "Failed to read RTC alarm date REG: %d\n", ret);
return ret;
}
alrm->time.tm_sec = bcd2bin(alrm_data[0] & SECONDS_REG_MSK);
alrm->time.tm_min = bcd2bin(alrm_data[1] & MINUTES_REG_MAK);
alrm->time.tm_hour = bcd2bin(alrm_data[2] & HOURS_REG_MSK);
alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK);
alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1;
alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100;
rockchip_to_gregorian(&alrm->time);
ret = regmap_read(rk808_rtc->regmap, rk808_rtc->creg->int_reg, &int_reg);
if (ret) {
dev_err(dev, "Failed to read RTC INT REG: %d\n", ret);
return ret;
}
dev_dbg(dev, "alrm read RTC date/time %ptRd(%d) %ptRt\n",
&alrm->time, alrm->time.tm_wday, &alrm->time);
alrm->enabled = (int_reg & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M) ? 1 : 0;
return 0;
}
static int rk808_rtc_stop_alarm(struct rk808_rtc *rk808_rtc)
{
int ret;
ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->int_reg,
BIT_RTC_INTERRUPTS_REG_IT_ALARM_M, 0);
return ret;
}
static int rk808_rtc_start_alarm(struct rk808_rtc *rk808_rtc)
{
int ret;
ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->int_reg,
BIT_RTC_INTERRUPTS_REG_IT_ALARM_M,
BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
return ret;
}
static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
u8 alrm_data[NUM_ALARM_REGS];
int ret;
ret = rk808_rtc_stop_alarm(rk808_rtc);
if (ret) {
dev_err(dev, "Failed to stop alarm: %d\n", ret);
return ret;
}
dev_dbg(dev, "alrm set RTC date/time %ptRd(%d) %ptRt\n",
&alrm->time, alrm->time.tm_wday, &alrm->time);
gregorian_to_rockchip(&alrm->time);
alrm_data[0] = bin2bcd(alrm->time.tm_sec);
alrm_data[1] = bin2bcd(alrm->time.tm_min);
alrm_data[2] = bin2bcd(alrm->time.tm_hour);
alrm_data[3] = bin2bcd(alrm->time.tm_mday);
alrm_data[4] = bin2bcd(alrm->time.tm_mon + 1);
alrm_data[5] = bin2bcd(alrm->time.tm_year - 100);
ret = regmap_bulk_write(rk808_rtc->regmap,
rk808_rtc->creg->alarm_seconds_reg,
alrm_data, NUM_ALARM_REGS);
if (ret) {
dev_err(dev, "Failed to bulk write: %d\n", ret);
return ret;
}
if (alrm->enabled) {
ret = rk808_rtc_start_alarm(rk808_rtc);
if (ret) {
dev_err(dev, "Failed to start alarm: %d\n", ret);
return ret;
}
}
return 0;
}
static int rk808_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
if (enabled)
return rk808_rtc_start_alarm(rk808_rtc);
return rk808_rtc_stop_alarm(rk808_rtc);
}
/*
* We will just handle setting the frequency and make use the framework for
* reading the periodic interupts.
*
* @freq: Current periodic IRQ freq:
* bit 0: every second
* bit 1: every minute
* bit 2: every hour
* bit 3: every day
*/
static irqreturn_t rk808_alarm_irq(int irq, void *data)
{
struct rk808_rtc *rk808_rtc = data;
int ret;
ret = regmap_write(rk808_rtc->regmap, rk808_rtc->creg->status_reg,
RTC_STATUS_MASK);
if (ret) {
dev_err(&rk808_rtc->rtc->dev,
"%s:Failed to update RTC status: %d\n", __func__, ret);
return ret;
}
rtc_update_irq(rk808_rtc->rtc, 1, RTC_IRQF | RTC_AF);
dev_dbg(&rk808_rtc->rtc->dev,
"%s:irq=%d\n", __func__, irq);
return IRQ_HANDLED;
}
static const struct rtc_class_ops rk808_rtc_ops = {
.read_time = rk808_rtc_readtime,
.set_time = rk808_rtc_set_time,
.read_alarm = rk808_rtc_readalarm,
.set_alarm = rk808_rtc_setalarm,
.alarm_irq_enable = rk808_rtc_alarm_irq_enable,
};
#ifdef CONFIG_PM_SLEEP
/* Turn off the alarm if it should not be a wake source. */
static int rk808_rtc_suspend(struct device *dev)
{
struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(rk808_rtc->irq);
return 0;
}
/* Enable the alarm if it should be enabled (in case it was disabled to
* prevent use as a wake source).
*/
static int rk808_rtc_resume(struct device *dev)
{
struct rk808_rtc *rk808_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(rk808_rtc->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(rk808_rtc_pm_ops,
rk808_rtc_suspend, rk808_rtc_resume);
static struct rk_rtc_compat_reg rk808_creg = {
.ctrl_reg = RK808_RTC_CTRL_REG,
.status_reg = RK808_RTC_STATUS_REG,
.alarm_seconds_reg = RK808_ALARM_SECONDS_REG,
.int_reg = RK808_RTC_INT_REG,
.seconds_reg = RK808_SECONDS_REG,
};
static struct rk_rtc_compat_reg rk817_creg = {
.ctrl_reg = RK817_RTC_CTRL_REG,
.status_reg = RK817_RTC_STATUS_REG,
.alarm_seconds_reg = RK817_ALARM_SECONDS_REG,
.int_reg = RK817_RTC_INT_REG,
.seconds_reg = RK817_SECONDS_REG,
};
static int rk808_rtc_probe(struct platform_device *pdev)
{
struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
struct rk808_rtc *rk808_rtc;
int ret;
rk808_rtc = devm_kzalloc(&pdev->dev, sizeof(*rk808_rtc), GFP_KERNEL);
if (rk808_rtc == NULL)
return -ENOMEM;
switch (rk808->variant) {
case RK809_ID:
case RK817_ID:
rk808_rtc->creg = &rk817_creg;
break;
default:
rk808_rtc->creg = &rk808_creg;
break;
}
platform_set_drvdata(pdev, rk808_rtc);
rk808_rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!rk808_rtc->regmap)
return -ENODEV;
/* start rtc running by default, and use shadowed timer. */
ret = regmap_update_bits(rk808_rtc->regmap, rk808_rtc->creg->ctrl_reg,
BIT_RTC_CTRL_REG_STOP_RTC_M |
BIT_RTC_CTRL_REG_RTC_READSEL_M,
BIT_RTC_CTRL_REG_RTC_READSEL_M);
if (ret) {
dev_err(&pdev->dev,
"Failed to update RTC control: %d\n", ret);
return ret;
}
ret = regmap_write(rk808_rtc->regmap, rk808_rtc->creg->status_reg,
RTC_STATUS_MASK);
if (ret) {
dev_err(&pdev->dev,
"Failed to write RTC status: %d\n", ret);
return ret;
}
device_init_wakeup(&pdev->dev, 1);
rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rk808_rtc->rtc))
return PTR_ERR(rk808_rtc->rtc);
rk808_rtc->rtc->ops = &rk808_rtc_ops;
rk808_rtc->irq = platform_get_irq(pdev, 0);
if (rk808_rtc->irq < 0)
return rk808_rtc->irq;
/* request alarm irq of rk808 */
ret = devm_request_threaded_irq(&pdev->dev, rk808_rtc->irq, NULL,
rk808_alarm_irq, 0,
"RTC alarm", rk808_rtc);
if (ret) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
rk808_rtc->irq, ret);
return ret;
}
return devm_rtc_register_device(rk808_rtc->rtc);
}
static struct platform_driver rk808_rtc_driver = {
.probe = rk808_rtc_probe,
.driver = {
.name = "rk808-rtc",
.pm = &rk808_rtc_pm_ops,
},
};
module_platform_driver(rk808_rtc_driver);
MODULE_DESCRIPTION("RTC driver for the rk808 series PMICs");
MODULE_AUTHOR("Chris Zhong <[email protected]>");
MODULE_AUTHOR("Zhang Qing <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rk808-rtc");
| linux-master | drivers/rtc/rtc-rk808.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* "RTT as Real Time Clock" driver for AT91SAM9 SoC family
*
* (C) 2007 Michel Benoit
*
* Based on rtc-at91rm9200.c by Rick Bronson
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/time.h>
/*
* This driver uses two configurable hardware resources that live in the
* AT91SAM9 backup power domain (intended to be powered at all times)
* to implement the Real Time Clock interfaces
*
* - A "Real-time Timer" (RTT) counts up in seconds from a base time.
* We can't assign the counter value (CRTV) ... but we can reset it.
*
* - One of the "General Purpose Backup Registers" (GPBRs) holds the
* base time, normally an offset from the beginning of the POSIX
* epoch (1970-Jan-1 00:00:00 UTC). Some systems also include the
* local timezone's offset.
*
* The RTC's value is the RTT counter plus that offset. The RTC's alarm
* is likewise a base (ALMV) plus that offset.
*
* Not all RTTs will be used as RTCs; some systems have multiple RTTs to
* choose from, or a "real" RTC module. All systems have multiple GPBR
* registers available, likewise usable for more than "RTC" support.
*/
#define AT91_RTT_MR 0x00 /* Real-time Mode Register */
#define AT91_RTT_RTPRES (0xffff << 0) /* Timer Prescaler Value */
#define AT91_RTT_ALMIEN BIT(16) /* Alarm Interrupt Enable */
#define AT91_RTT_RTTINCIEN BIT(17) /* Increment Interrupt Enable */
#define AT91_RTT_RTTRST BIT(18) /* Timer Restart */
#define AT91_RTT_AR 0x04 /* Real-time Alarm Register */
#define AT91_RTT_ALMV (0xffffffff) /* Alarm Value */
#define AT91_RTT_VR 0x08 /* Real-time Value Register */
#define AT91_RTT_CRTV (0xffffffff) /* Current Real-time Value */
#define AT91_RTT_SR 0x0c /* Real-time Status Register */
#define AT91_RTT_ALMS BIT(0) /* Alarm Status */
#define AT91_RTT_RTTINC BIT(1) /* Timer Increment */
/*
* We store ALARM_DISABLED in ALMV to record that no alarm is set.
* It's also the reset value for that field.
*/
#define ALARM_DISABLED ((u32)~0)
struct sam9_rtc {
void __iomem *rtt;
struct rtc_device *rtcdev;
u32 imr;
struct regmap *gpbr;
unsigned int gpbr_offset;
int irq;
struct clk *sclk;
bool suspended;
unsigned long events;
spinlock_t lock;
};
#define rtt_readl(rtc, field) \
readl((rtc)->rtt + AT91_RTT_ ## field)
#define rtt_writel(rtc, field, val) \
writel((val), (rtc)->rtt + AT91_RTT_ ## field)
static inline unsigned int gpbr_readl(struct sam9_rtc *rtc)
{
unsigned int val;
regmap_read(rtc->gpbr, rtc->gpbr_offset, &val);
return val;
}
static inline void gpbr_writel(struct sam9_rtc *rtc, unsigned int val)
{
regmap_write(rtc->gpbr, rtc->gpbr_offset, val);
}
/*
* Read current time and date in RTC
*/
static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
u32 secs, secs2;
u32 offset;
/* read current time offset */
offset = gpbr_readl(rtc);
if (offset == 0)
return -EILSEQ;
/* reread the counter to help sync the two clock domains */
secs = rtt_readl(rtc, VR);
secs2 = rtt_readl(rtc, VR);
if (secs != secs2)
secs = rtt_readl(rtc, VR);
rtc_time64_to_tm(offset + secs, tm);
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
return 0;
}
/*
* Set current time and date in RTC
*/
static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
u32 offset, alarm, mr;
unsigned long secs;
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
secs = rtc_tm_to_time64(tm);
mr = rtt_readl(rtc, MR);
/* disable interrupts */
rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
/* read current time offset */
offset = gpbr_readl(rtc);
/* store the new base time in a battery backup register */
secs += 1;
gpbr_writel(rtc, secs);
/* adjust the alarm time for the new base */
alarm = rtt_readl(rtc, AR);
if (alarm != ALARM_DISABLED) {
if (offset > secs) {
/* time jumped backwards, increase time until alarm */
alarm += (offset - secs);
} else if ((alarm + offset) > secs) {
/* time jumped forwards, decrease time until alarm */
alarm -= (secs - offset);
} else {
/* time jumped past the alarm, disable alarm */
alarm = ALARM_DISABLED;
mr &= ~AT91_RTT_ALMIEN;
}
rtt_writel(rtc, AR, alarm);
}
/* reset the timer, and re-enable interrupts */
rtt_writel(rtc, MR, mr | AT91_RTT_RTTRST);
return 0;
}
static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
u32 alarm = rtt_readl(rtc, AR);
u32 offset;
offset = gpbr_readl(rtc);
if (offset == 0)
return -EILSEQ;
memset(alrm, 0, sizeof(*alrm));
if (alarm != ALARM_DISABLED) {
rtc_time64_to_tm(offset + alarm, tm);
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
if (rtt_readl(rtc, MR) & AT91_RTT_ALMIEN)
alrm->enabled = 1;
}
return 0;
}
static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
unsigned long secs;
u32 offset;
u32 mr;
secs = rtc_tm_to_time64(tm);
offset = gpbr_readl(rtc);
if (offset == 0) {
/* time is not set */
return -EILSEQ;
}
mr = rtt_readl(rtc, MR);
rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
/* alarm in the past? finish and leave disabled */
if (secs <= offset) {
rtt_writel(rtc, AR, ALARM_DISABLED);
return 0;
}
/* else set alarm and maybe enable it */
rtt_writel(rtc, AR, secs - offset);
if (alrm->enabled)
rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
dev_dbg(dev, "%s: %ptR\n", __func__, tm);
return 0;
}
static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
u32 mr = rtt_readl(rtc, MR);
dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
if (enabled)
rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
else
rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
return 0;
}
/*
* Provide additional RTC information in /proc/driver/rtc
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
u32 mr = rtt_readl(rtc, MR);
seq_printf(seq, "update_IRQ\t: %s\n",
(mr & AT91_RTT_RTTINCIEN) ? "yes" : "no");
return 0;
}
static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc)
{
u32 sr, mr;
/* Shared interrupt may be for another device. Note: reading
* SR clears it, so we must only read it in this irq handler!
*/
mr = rtt_readl(rtc, MR) & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
sr = rtt_readl(rtc, SR) & (mr >> 16);
if (!sr)
return IRQ_NONE;
/* alarm status */
if (sr & AT91_RTT_ALMS)
rtc->events |= (RTC_AF | RTC_IRQF);
/* timer update/increment */
if (sr & AT91_RTT_RTTINC)
rtc->events |= (RTC_UF | RTC_IRQF);
return IRQ_HANDLED;
}
static void at91_rtc_flush_events(struct sam9_rtc *rtc)
{
if (!rtc->events)
return;
rtc_update_irq(rtc->rtcdev, 1, rtc->events);
rtc->events = 0;
pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
rtc->events >> 8, rtc->events & 0x000000FF);
}
/*
* IRQ handler for the RTC
*/
static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
{
struct sam9_rtc *rtc = _rtc;
int ret;
spin_lock(&rtc->lock);
ret = at91_rtc_cache_events(rtc);
/* We're called in suspended state */
if (rtc->suspended) {
/* Mask irqs coming from this peripheral */
rtt_writel(rtc, MR,
rtt_readl(rtc, MR) &
~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
/* Trigger a system wakeup */
pm_system_wakeup();
} else {
at91_rtc_flush_events(rtc);
}
spin_unlock(&rtc->lock);
return ret;
}
static const struct rtc_class_ops at91_rtc_ops = {
.read_time = at91_rtc_readtime,
.set_time = at91_rtc_settime,
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
.alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*
* Initialize and install RTC driver
*/
static int at91_rtc_probe(struct platform_device *pdev)
{
struct sam9_rtc *rtc;
int ret, irq;
u32 mr;
unsigned int sclk_rate;
struct of_phandle_args args;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
spin_lock_init(&rtc->lock);
rtc->irq = irq;
/* platform setup code should have handled this; sigh */
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, rtc);
rtc->rtt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtt))
return PTR_ERR(rtc->rtt);
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"atmel,rtt-rtc-time-reg", 1, 0,
&args);
if (ret)
return ret;
rtc->gpbr = syscon_node_to_regmap(args.np);
rtc->gpbr_offset = args.args[0];
if (IS_ERR(rtc->gpbr)) {
dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n");
return -ENOMEM;
}
rtc->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rtc->sclk))
return PTR_ERR(rtc->sclk);
ret = clk_prepare_enable(rtc->sclk);
if (ret) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return ret;
}
sclk_rate = clk_get_rate(rtc->sclk);
if (!sclk_rate || sclk_rate > AT91_RTT_RTPRES) {
dev_err(&pdev->dev, "Invalid slow clock rate\n");
ret = -EINVAL;
goto err_clk;
}
mr = rtt_readl(rtc, MR);
/* unless RTT is counting at 1 Hz, re-initialize it */
if ((mr & AT91_RTT_RTPRES) != sclk_rate) {
mr = AT91_RTT_RTTRST | (sclk_rate & AT91_RTT_RTPRES);
gpbr_writel(rtc, 0);
}
/* disable all interrupts (same as on shutdown path) */
mr &= ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
rtt_writel(rtc, MR, mr);
rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtcdev)) {
ret = PTR_ERR(rtc->rtcdev);
goto err_clk;
}
rtc->rtcdev->ops = &at91_rtc_ops;
rtc->rtcdev->range_max = U32_MAX;
/* register irq handler after we know what name we'll use */
ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
IRQF_SHARED | IRQF_COND_SUSPEND,
dev_name(&rtc->rtcdev->dev), rtc);
if (ret) {
dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
goto err_clk;
}
/* NOTE: sam9260 rev A silicon has a ROM bug which resets the
* RTT on at least some reboots. If you have that chip, you must
* initialize the time from some external source like a GPS, wall
* clock, discrete RTC, etc
*/
if (gpbr_readl(rtc) == 0)
dev_warn(&pdev->dev, "%s: SET TIME!\n",
dev_name(&rtc->rtcdev->dev));
return devm_rtc_register_device(rtc->rtcdev);
err_clk:
clk_disable_unprepare(rtc->sclk);
return ret;
}
/*
* Disable and remove the RTC driver
*/
static void at91_rtc_remove(struct platform_device *pdev)
{
struct sam9_rtc *rtc = platform_get_drvdata(pdev);
u32 mr = rtt_readl(rtc, MR);
/* disable all interrupts */
rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
clk_disable_unprepare(rtc->sclk);
}
static void at91_rtc_shutdown(struct platform_device *pdev)
{
struct sam9_rtc *rtc = platform_get_drvdata(pdev);
u32 mr = rtt_readl(rtc, MR);
rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
rtt_writel(rtc, MR, mr & ~rtc->imr);
}
#ifdef CONFIG_PM_SLEEP
/* AT91SAM9 RTC Power management control */
static int at91_rtc_suspend(struct device *dev)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
u32 mr = rtt_readl(rtc, MR);
/*
* This IRQ is shared with DBGU and other hardware which isn't
* necessarily a wakeup event source.
*/
rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
if (rtc->imr) {
if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) {
unsigned long flags;
enable_irq_wake(rtc->irq);
spin_lock_irqsave(&rtc->lock, flags);
rtc->suspended = true;
spin_unlock_irqrestore(&rtc->lock, flags);
/* don't let RTTINC cause wakeups */
if (mr & AT91_RTT_RTTINCIEN)
rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
} else {
rtt_writel(rtc, MR, mr & ~rtc->imr);
}
}
return 0;
}
static int at91_rtc_resume(struct device *dev)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
u32 mr;
if (rtc->imr) {
unsigned long flags;
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq);
mr = rtt_readl(rtc, MR);
rtt_writel(rtc, MR, mr | rtc->imr);
spin_lock_irqsave(&rtc->lock, flags);
rtc->suspended = false;
at91_rtc_cache_events(rtc);
at91_rtc_flush_events(rtc);
spin_unlock_irqrestore(&rtc->lock, flags);
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
static const struct of_device_id at91_rtc_dt_ids[] = {
{ .compatible = "atmel,at91sam9260-rtt" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
static struct platform_driver at91_rtc_driver = {
.probe = at91_rtc_probe,
.remove_new = at91_rtc_remove,
.shutdown = at91_rtc_shutdown,
.driver = {
.name = "rtc-at91sam9",
.pm = &at91_rtc_pm_ops,
.of_match_table = at91_rtc_dt_ids,
},
};
module_platform_driver(at91_rtc_driver);
MODULE_AUTHOR("Michel Benoit");
MODULE_DESCRIPTION("RTC driver for Atmel AT91SAM9x");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-at91sam9.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RTC Driver for X-Powers AC100
*
* Copyright (c) 2016 Chen-Yu Tsai
*
* Chen-Yu Tsai <[email protected]>
*/
#include <linux/bcd.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/ac100.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/types.h>
/* Control register */
#define AC100_RTC_CTRL_24HOUR BIT(0)
/* Clock output register bits */
#define AC100_CLKOUT_PRE_DIV_SHIFT 5
#define AC100_CLKOUT_PRE_DIV_WIDTH 3
#define AC100_CLKOUT_MUX_SHIFT 4
#define AC100_CLKOUT_MUX_WIDTH 1
#define AC100_CLKOUT_DIV_SHIFT 1
#define AC100_CLKOUT_DIV_WIDTH 3
#define AC100_CLKOUT_EN BIT(0)
/* RTC */
#define AC100_RTC_SEC_MASK GENMASK(6, 0)
#define AC100_RTC_MIN_MASK GENMASK(6, 0)
#define AC100_RTC_HOU_MASK GENMASK(5, 0)
#define AC100_RTC_WEE_MASK GENMASK(2, 0)
#define AC100_RTC_DAY_MASK GENMASK(5, 0)
#define AC100_RTC_MON_MASK GENMASK(4, 0)
#define AC100_RTC_YEA_MASK GENMASK(7, 0)
#define AC100_RTC_YEA_LEAP BIT(15)
#define AC100_RTC_UPD_TRIGGER BIT(15)
/* Alarm (wall clock) */
#define AC100_ALM_INT_ENABLE BIT(0)
#define AC100_ALM_SEC_MASK GENMASK(6, 0)
#define AC100_ALM_MIN_MASK GENMASK(6, 0)
#define AC100_ALM_HOU_MASK GENMASK(5, 0)
#define AC100_ALM_WEE_MASK GENMASK(2, 0)
#define AC100_ALM_DAY_MASK GENMASK(5, 0)
#define AC100_ALM_MON_MASK GENMASK(4, 0)
#define AC100_ALM_YEA_MASK GENMASK(7, 0)
#define AC100_ALM_ENABLE_FLAG BIT(15)
#define AC100_ALM_UPD_TRIGGER BIT(15)
/*
* The year parameter passed to the driver is usually an offset relative to
* the year 1900. This macro is used to convert this offset to another one
* relative to the minimum year allowed by the hardware.
*
* The year range is 1970 - 2069. This range is selected to match Allwinner's
* driver.
*/
#define AC100_YEAR_MIN 1970
#define AC100_YEAR_MAX 2069
#define AC100_YEAR_OFF (AC100_YEAR_MIN - 1900)
struct ac100_clkout {
struct clk_hw hw;
struct regmap *regmap;
u8 offset;
};
#define to_ac100_clkout(_hw) container_of(_hw, struct ac100_clkout, hw)
#define AC100_RTC_32K_NAME "ac100-rtc-32k"
#define AC100_RTC_32K_RATE 32768
#define AC100_CLKOUT_NUM 3
static const char * const ac100_clkout_names[AC100_CLKOUT_NUM] = {
"ac100-cko1-rtc",
"ac100-cko2-rtc",
"ac100-cko3-rtc",
};
struct ac100_rtc_dev {
struct rtc_device *rtc;
struct device *dev;
struct regmap *regmap;
int irq;
unsigned long alarm;
struct clk_hw *rtc_32k_clk;
struct ac100_clkout clks[AC100_CLKOUT_NUM];
struct clk_hw_onecell_data *clk_data;
};
/**
* Clock controls for 3 clock output pins
*/
static const struct clk_div_table ac100_clkout_prediv[] = {
{ .val = 0, .div = 1 },
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 8 },
{ .val = 4, .div = 16 },
{ .val = 5, .div = 32 },
{ .val = 6, .div = 64 },
{ .val = 7, .div = 122 },
{ },
};
/* Abuse the fact that one parent is 32768 Hz, and the other is 4 MHz */
static unsigned long ac100_clkout_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct ac100_clkout *clk = to_ac100_clkout(hw);
unsigned int reg, div;
regmap_read(clk->regmap, clk->offset, ®);
/* Handle pre-divider first */
if (prate != AC100_RTC_32K_RATE) {
div = (reg >> AC100_CLKOUT_PRE_DIV_SHIFT) &
((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1);
prate = divider_recalc_rate(hw, prate, div,
ac100_clkout_prediv, 0,
AC100_CLKOUT_PRE_DIV_WIDTH);
}
div = (reg >> AC100_CLKOUT_DIV_SHIFT) &
(BIT(AC100_CLKOUT_DIV_WIDTH) - 1);
return divider_recalc_rate(hw, prate, div, NULL,
CLK_DIVIDER_POWER_OF_TWO,
AC100_CLKOUT_DIV_WIDTH);
}
static long ac100_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
unsigned long best_rate = 0, tmp_rate, tmp_prate;
int i;
if (prate == AC100_RTC_32K_RATE)
return divider_round_rate(hw, rate, &prate, NULL,
AC100_CLKOUT_DIV_WIDTH,
CLK_DIVIDER_POWER_OF_TWO);
for (i = 0; ac100_clkout_prediv[i].div; i++) {
tmp_prate = DIV_ROUND_UP(prate, ac100_clkout_prediv[i].val);
tmp_rate = divider_round_rate(hw, rate, &tmp_prate, NULL,
AC100_CLKOUT_DIV_WIDTH,
CLK_DIVIDER_POWER_OF_TWO);
if (tmp_rate > rate)
continue;
if (rate - tmp_rate < best_rate - tmp_rate)
best_rate = tmp_rate;
}
return best_rate;
}
static int ac100_clkout_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_hw *best_parent;
unsigned long best = 0;
int i, num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++) {
struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
unsigned long tmp, prate;
/*
* The clock has two parents, one is a fixed clock which is
* internally registered by the ac100 driver. The other parent
* is a clock from the codec side of the chip, which we
* properly declare and reference in the devicetree and is
* not implemented in any driver right now.
* If the clock core looks for the parent of that second
* missing clock, it can't find one that is registered and
* returns NULL.
* So we end up in a situation where clk_hw_get_num_parents
* returns the amount of clocks we can be parented to, but
* clk_hw_get_parent_by_index will not return the orphan
* clocks.
* Thus we need to check if the parent exists before
* we get the parent rate, so we could use the RTC
* without waiting for the codec to be supported.
*/
if (!parent)
continue;
prate = clk_hw_get_rate(parent);
tmp = ac100_clkout_round_rate(hw, req->rate, prate);
if (tmp > req->rate)
continue;
if (req->rate - tmp < req->rate - best) {
best = tmp;
best_parent = parent;
}
}
if (!best)
return -EINVAL;
req->best_parent_hw = best_parent;
req->best_parent_rate = best;
req->rate = best;
return 0;
}
static int ac100_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct ac100_clkout *clk = to_ac100_clkout(hw);
int div = 0, pre_div = 0;
do {
div = divider_get_val(rate * ac100_clkout_prediv[pre_div].div,
prate, NULL, AC100_CLKOUT_DIV_WIDTH,
CLK_DIVIDER_POWER_OF_TWO);
if (div >= 0)
break;
} while (prate != AC100_RTC_32K_RATE &&
ac100_clkout_prediv[++pre_div].div);
if (div < 0)
return div;
pre_div = ac100_clkout_prediv[pre_div].val;
regmap_update_bits(clk->regmap, clk->offset,
((1 << AC100_CLKOUT_DIV_WIDTH) - 1) << AC100_CLKOUT_DIV_SHIFT |
((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1) << AC100_CLKOUT_PRE_DIV_SHIFT,
(div - 1) << AC100_CLKOUT_DIV_SHIFT |
(pre_div - 1) << AC100_CLKOUT_PRE_DIV_SHIFT);
return 0;
}
static int ac100_clkout_prepare(struct clk_hw *hw)
{
struct ac100_clkout *clk = to_ac100_clkout(hw);
return regmap_update_bits(clk->regmap, clk->offset, AC100_CLKOUT_EN,
AC100_CLKOUT_EN);
}
static void ac100_clkout_unprepare(struct clk_hw *hw)
{
struct ac100_clkout *clk = to_ac100_clkout(hw);
regmap_update_bits(clk->regmap, clk->offset, AC100_CLKOUT_EN, 0);
}
static int ac100_clkout_is_prepared(struct clk_hw *hw)
{
struct ac100_clkout *clk = to_ac100_clkout(hw);
unsigned int reg;
regmap_read(clk->regmap, clk->offset, ®);
return reg & AC100_CLKOUT_EN;
}
static u8 ac100_clkout_get_parent(struct clk_hw *hw)
{
struct ac100_clkout *clk = to_ac100_clkout(hw);
unsigned int reg;
regmap_read(clk->regmap, clk->offset, ®);
return (reg >> AC100_CLKOUT_MUX_SHIFT) & 0x1;
}
static int ac100_clkout_set_parent(struct clk_hw *hw, u8 index)
{
struct ac100_clkout *clk = to_ac100_clkout(hw);
return regmap_update_bits(clk->regmap, clk->offset,
BIT(AC100_CLKOUT_MUX_SHIFT),
index ? BIT(AC100_CLKOUT_MUX_SHIFT) : 0);
}
static const struct clk_ops ac100_clkout_ops = {
.prepare = ac100_clkout_prepare,
.unprepare = ac100_clkout_unprepare,
.is_prepared = ac100_clkout_is_prepared,
.recalc_rate = ac100_clkout_recalc_rate,
.determine_rate = ac100_clkout_determine_rate,
.get_parent = ac100_clkout_get_parent,
.set_parent = ac100_clkout_set_parent,
.set_rate = ac100_clkout_set_rate,
};
static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip)
{
struct device_node *np = chip->dev->of_node;
const char *parents[2] = {AC100_RTC_32K_NAME};
int i, ret;
chip->clk_data = devm_kzalloc(chip->dev,
struct_size(chip->clk_data, hws,
AC100_CLKOUT_NUM),
GFP_KERNEL);
if (!chip->clk_data)
return -ENOMEM;
chip->rtc_32k_clk = clk_hw_register_fixed_rate(chip->dev,
AC100_RTC_32K_NAME,
NULL, 0,
AC100_RTC_32K_RATE);
if (IS_ERR(chip->rtc_32k_clk)) {
ret = PTR_ERR(chip->rtc_32k_clk);
dev_err(chip->dev, "Failed to register RTC-32k clock: %d\n",
ret);
return ret;
}
parents[1] = of_clk_get_parent_name(np, 0);
if (!parents[1]) {
dev_err(chip->dev, "Failed to get ADDA 4M clock\n");
return -EINVAL;
}
for (i = 0; i < AC100_CLKOUT_NUM; i++) {
struct ac100_clkout *clk = &chip->clks[i];
struct clk_init_data init = {
.name = ac100_clkout_names[i],
.ops = &ac100_clkout_ops,
.parent_names = parents,
.num_parents = ARRAY_SIZE(parents),
.flags = 0,
};
of_property_read_string_index(np, "clock-output-names",
i, &init.name);
clk->regmap = chip->regmap;
clk->offset = AC100_CLKOUT_CTRL1 + i;
clk->hw.init = &init;
ret = devm_clk_hw_register(chip->dev, &clk->hw);
if (ret) {
dev_err(chip->dev, "Failed to register clk '%s': %d\n",
init.name, ret);
goto err_unregister_rtc_32k;
}
chip->clk_data->hws[i] = &clk->hw;
}
chip->clk_data->num = i;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, chip->clk_data);
if (ret)
goto err_unregister_rtc_32k;
return 0;
err_unregister_rtc_32k:
clk_unregister_fixed_rate(chip->rtc_32k_clk->clk);
return ret;
}
static void ac100_rtc_unregister_clks(struct ac100_rtc_dev *chip)
{
of_clk_del_provider(chip->dev->of_node);
clk_unregister_fixed_rate(chip->rtc_32k_clk->clk);
}
/**
* RTC related bits
*/
static int ac100_rtc_get_time(struct device *dev, struct rtc_time *rtc_tm)
{
struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
struct regmap *regmap = chip->regmap;
u16 reg[7];
int ret;
ret = regmap_bulk_read(regmap, AC100_RTC_SEC, reg, 7);
if (ret)
return ret;
rtc_tm->tm_sec = bcd2bin(reg[0] & AC100_RTC_SEC_MASK);
rtc_tm->tm_min = bcd2bin(reg[1] & AC100_RTC_MIN_MASK);
rtc_tm->tm_hour = bcd2bin(reg[2] & AC100_RTC_HOU_MASK);
rtc_tm->tm_wday = bcd2bin(reg[3] & AC100_RTC_WEE_MASK);
rtc_tm->tm_mday = bcd2bin(reg[4] & AC100_RTC_DAY_MASK);
rtc_tm->tm_mon = bcd2bin(reg[5] & AC100_RTC_MON_MASK) - 1;
rtc_tm->tm_year = bcd2bin(reg[6] & AC100_RTC_YEA_MASK) +
AC100_YEAR_OFF;
return 0;
}
static int ac100_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
{
struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
struct regmap *regmap = chip->regmap;
int year;
u16 reg[8];
/* our RTC has a limited year range... */
year = rtc_tm->tm_year - AC100_YEAR_OFF;
if (year < 0 || year > (AC100_YEAR_MAX - 1900)) {
dev_err(dev, "rtc only supports year in range %d - %d\n",
AC100_YEAR_MIN, AC100_YEAR_MAX);
return -EINVAL;
}
/* convert to BCD */
reg[0] = bin2bcd(rtc_tm->tm_sec) & AC100_RTC_SEC_MASK;
reg[1] = bin2bcd(rtc_tm->tm_min) & AC100_RTC_MIN_MASK;
reg[2] = bin2bcd(rtc_tm->tm_hour) & AC100_RTC_HOU_MASK;
reg[3] = bin2bcd(rtc_tm->tm_wday) & AC100_RTC_WEE_MASK;
reg[4] = bin2bcd(rtc_tm->tm_mday) & AC100_RTC_DAY_MASK;
reg[5] = bin2bcd(rtc_tm->tm_mon + 1) & AC100_RTC_MON_MASK;
reg[6] = bin2bcd(year) & AC100_RTC_YEA_MASK;
/* trigger write */
reg[7] = AC100_RTC_UPD_TRIGGER;
/* Is it a leap year? */
if (is_leap_year(year + AC100_YEAR_OFF + 1900))
reg[6] |= AC100_RTC_YEA_LEAP;
return regmap_bulk_write(regmap, AC100_RTC_SEC, reg, 8);
}
static int ac100_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
{
struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
struct regmap *regmap = chip->regmap;
unsigned int val;
val = en ? AC100_ALM_INT_ENABLE : 0;
return regmap_write(regmap, AC100_ALM_INT_ENA, val);
}
static int ac100_rtc_get_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
struct regmap *regmap = chip->regmap;
struct rtc_time *alrm_tm = &alrm->time;
u16 reg[7];
unsigned int val;
int ret;
ret = regmap_read(regmap, AC100_ALM_INT_ENA, &val);
if (ret)
return ret;
alrm->enabled = !!(val & AC100_ALM_INT_ENABLE);
ret = regmap_bulk_read(regmap, AC100_ALM_SEC, reg, 7);
if (ret)
return ret;
alrm_tm->tm_sec = bcd2bin(reg[0] & AC100_ALM_SEC_MASK);
alrm_tm->tm_min = bcd2bin(reg[1] & AC100_ALM_MIN_MASK);
alrm_tm->tm_hour = bcd2bin(reg[2] & AC100_ALM_HOU_MASK);
alrm_tm->tm_wday = bcd2bin(reg[3] & AC100_ALM_WEE_MASK);
alrm_tm->tm_mday = bcd2bin(reg[4] & AC100_ALM_DAY_MASK);
alrm_tm->tm_mon = bcd2bin(reg[5] & AC100_ALM_MON_MASK) - 1;
alrm_tm->tm_year = bcd2bin(reg[6] & AC100_ALM_YEA_MASK) +
AC100_YEAR_OFF;
return 0;
}
static int ac100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct ac100_rtc_dev *chip = dev_get_drvdata(dev);
struct regmap *regmap = chip->regmap;
struct rtc_time *alrm_tm = &alrm->time;
u16 reg[8];
int year;
int ret;
/* our alarm has a limited year range... */
year = alrm_tm->tm_year - AC100_YEAR_OFF;
if (year < 0 || year > (AC100_YEAR_MAX - 1900)) {
dev_err(dev, "alarm only supports year in range %d - %d\n",
AC100_YEAR_MIN, AC100_YEAR_MAX);
return -EINVAL;
}
/* convert to BCD */
reg[0] = (bin2bcd(alrm_tm->tm_sec) & AC100_ALM_SEC_MASK) |
AC100_ALM_ENABLE_FLAG;
reg[1] = (bin2bcd(alrm_tm->tm_min) & AC100_ALM_MIN_MASK) |
AC100_ALM_ENABLE_FLAG;
reg[2] = (bin2bcd(alrm_tm->tm_hour) & AC100_ALM_HOU_MASK) |
AC100_ALM_ENABLE_FLAG;
/* Do not enable weekday alarm */
reg[3] = bin2bcd(alrm_tm->tm_wday) & AC100_ALM_WEE_MASK;
reg[4] = (bin2bcd(alrm_tm->tm_mday) & AC100_ALM_DAY_MASK) |
AC100_ALM_ENABLE_FLAG;
reg[5] = (bin2bcd(alrm_tm->tm_mon + 1) & AC100_ALM_MON_MASK) |
AC100_ALM_ENABLE_FLAG;
reg[6] = (bin2bcd(year) & AC100_ALM_YEA_MASK) |
AC100_ALM_ENABLE_FLAG;
/* trigger write */
reg[7] = AC100_ALM_UPD_TRIGGER;
ret = regmap_bulk_write(regmap, AC100_ALM_SEC, reg, 8);
if (ret)
return ret;
return ac100_rtc_alarm_irq_enable(dev, alrm->enabled);
}
static irqreturn_t ac100_rtc_irq(int irq, void *data)
{
struct ac100_rtc_dev *chip = data;
struct regmap *regmap = chip->regmap;
unsigned int val = 0;
int ret;
rtc_lock(chip->rtc);
/* read status */
ret = regmap_read(regmap, AC100_ALM_INT_STA, &val);
if (ret)
goto out;
if (val & AC100_ALM_INT_ENABLE) {
/* signal rtc framework */
rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
/* clear status */
ret = regmap_write(regmap, AC100_ALM_INT_STA, val);
if (ret)
goto out;
/* disable interrupt */
ret = ac100_rtc_alarm_irq_enable(chip->dev, 0);
if (ret)
goto out;
}
out:
rtc_unlock(chip->rtc);
return IRQ_HANDLED;
}
static const struct rtc_class_ops ac100_rtc_ops = {
.read_time = ac100_rtc_get_time,
.set_time = ac100_rtc_set_time,
.read_alarm = ac100_rtc_get_alarm,
.set_alarm = ac100_rtc_set_alarm,
.alarm_irq_enable = ac100_rtc_alarm_irq_enable,
};
static int ac100_rtc_probe(struct platform_device *pdev)
{
struct ac100_dev *ac100 = dev_get_drvdata(pdev->dev.parent);
struct ac100_rtc_dev *chip;
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(pdev, chip);
chip->dev = &pdev->dev;
chip->regmap = ac100->regmap;
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
return chip->irq;
chip->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
chip->rtc->ops = &ac100_rtc_ops;
ret = devm_request_threaded_irq(&pdev->dev, chip->irq, NULL,
ac100_rtc_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&pdev->dev), chip);
if (ret) {
dev_err(&pdev->dev, "Could not request IRQ\n");
return ret;
}
/* always use 24 hour mode */
regmap_write_bits(chip->regmap, AC100_RTC_CTRL, AC100_RTC_CTRL_24HOUR,
AC100_RTC_CTRL_24HOUR);
/* disable counter alarm interrupt */
regmap_write(chip->regmap, AC100_ALM_INT_ENA, 0);
/* clear counter alarm pending interrupts */
regmap_write(chip->regmap, AC100_ALM_INT_STA, AC100_ALM_INT_ENABLE);
ret = ac100_rtc_register_clks(chip);
if (ret)
return ret;
return devm_rtc_register_device(chip->rtc);
}
static void ac100_rtc_remove(struct platform_device *pdev)
{
struct ac100_rtc_dev *chip = platform_get_drvdata(pdev);
ac100_rtc_unregister_clks(chip);
}
static const struct of_device_id ac100_rtc_match[] = {
{ .compatible = "x-powers,ac100-rtc" },
{ },
};
MODULE_DEVICE_TABLE(of, ac100_rtc_match);
static struct platform_driver ac100_rtc_driver = {
.probe = ac100_rtc_probe,
.remove_new = ac100_rtc_remove,
.driver = {
.name = "ac100-rtc",
.of_match_table = of_match_ptr(ac100_rtc_match),
},
};
module_platform_driver(ac100_rtc_driver);
MODULE_DESCRIPTION("X-Powers AC100 RTC driver");
MODULE_AUTHOR("Chen-Yu Tsai <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-ac100.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* An I2C driver for the Philips PCF8563 RTC
* Copyright 2005-06 Tower Technologies
*
* Author: Alessandro Zummo <[email protected]>
* Maintainers: http://www.nslu2-linux.org/
*
* based on the other drivers in this same directory.
*
* https://www.nxp.com/docs/en/data-sheet/PCF8563.pdf
*/
#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/err.h>
#define PCF8563_REG_ST1 0x00 /* status */
#define PCF8563_REG_ST2 0x01
#define PCF8563_BIT_AIE BIT(1)
#define PCF8563_BIT_AF BIT(3)
#define PCF8563_BITS_ST2_N (7 << 5)
#define PCF8563_REG_SC 0x02 /* datetime */
#define PCF8563_REG_MN 0x03
#define PCF8563_REG_HR 0x04
#define PCF8563_REG_DM 0x05
#define PCF8563_REG_DW 0x06
#define PCF8563_REG_MO 0x07
#define PCF8563_REG_YR 0x08
#define PCF8563_REG_AMN 0x09 /* alarm */
#define PCF8563_REG_CLKO 0x0D /* clock out */
#define PCF8563_REG_CLKO_FE 0x80 /* clock out enabled */
#define PCF8563_REG_CLKO_F_MASK 0x03 /* frequenc mask */
#define PCF8563_REG_CLKO_F_32768HZ 0x00
#define PCF8563_REG_CLKO_F_1024HZ 0x01
#define PCF8563_REG_CLKO_F_32HZ 0x02
#define PCF8563_REG_CLKO_F_1HZ 0x03
#define PCF8563_REG_TMRC 0x0E /* timer control */
#define PCF8563_TMRC_ENABLE BIT(7)
#define PCF8563_TMRC_4096 0
#define PCF8563_TMRC_64 1
#define PCF8563_TMRC_1 2
#define PCF8563_TMRC_1_60 3
#define PCF8563_TMRC_MASK 3
#define PCF8563_REG_TMR 0x0F /* timer */
#define PCF8563_SC_LV 0x80 /* low voltage */
#define PCF8563_MO_C 0x80 /* century */
static struct i2c_driver pcf8563_driver;
struct pcf8563 {
struct rtc_device *rtc;
/*
* The meaning of MO_C bit varies by the chip type.
* From PCF8563 datasheet: this bit is toggled when the years
* register overflows from 99 to 00
* 0 indicates the century is 20xx
* 1 indicates the century is 19xx
* From RTC8564 datasheet: this bit indicates change of
* century. When the year digit data overflows from 99 to 00,
* this bit is set. By presetting it to 0 while still in the
* 20th century, it will be set in year 2000, ...
* There seems no reliable way to know how the system use this
* bit. So let's do it heuristically, assuming we are live in
* 1970...2069.
*/
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
struct i2c_client *client;
#ifdef CONFIG_COMMON_CLK
struct clk_hw clkout_hw;
#endif
};
static int pcf8563_read_block_data(struct i2c_client *client, unsigned char reg,
unsigned char length, unsigned char *buf)
{
struct i2c_msg msgs[] = {
{/* setup read ptr */
.addr = client->addr,
.len = 1,
.buf = ®,
},
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = length,
.buf = buf
},
};
if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
return 0;
}
static int pcf8563_write_block_data(struct i2c_client *client,
unsigned char reg, unsigned char length,
unsigned char *buf)
{
int i, err;
for (i = 0; i < length; i++) {
unsigned char data[2] = { reg + i, buf[i] };
err = i2c_master_send(client, data, sizeof(data));
if (err != sizeof(data)) {
dev_err(&client->dev,
"%s: err=%d addr=%02x, data=%02x\n",
__func__, err, data[0], data[1]);
return -EIO;
}
}
return 0;
}
static int pcf8563_set_alarm_mode(struct i2c_client *client, bool on)
{
unsigned char buf;
int err;
err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, &buf);
if (err < 0)
return err;
if (on)
buf |= PCF8563_BIT_AIE;
else
buf &= ~PCF8563_BIT_AIE;
buf &= ~(PCF8563_BIT_AF | PCF8563_BITS_ST2_N);
err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, &buf);
if (err < 0) {
dev_err(&client->dev, "%s: write error\n", __func__);
return -EIO;
}
return 0;
}
static int pcf8563_get_alarm_mode(struct i2c_client *client, unsigned char *en,
unsigned char *pen)
{
unsigned char buf;
int err;
err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, &buf);
if (err)
return err;
if (en)
*en = !!(buf & PCF8563_BIT_AIE);
if (pen)
*pen = !!(buf & PCF8563_BIT_AF);
return 0;
}
static irqreturn_t pcf8563_irq(int irq, void *dev_id)
{
struct pcf8563 *pcf8563 = i2c_get_clientdata(dev_id);
int err;
char pending;
err = pcf8563_get_alarm_mode(pcf8563->client, NULL, &pending);
if (err)
return IRQ_NONE;
if (pending) {
rtc_update_irq(pcf8563->rtc, 1, RTC_IRQF | RTC_AF);
pcf8563_set_alarm_mode(pcf8563->client, 1);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/*
* In the routines that deal directly with the pcf8563 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
unsigned char buf[9];
int err;
err = pcf8563_read_block_data(client, PCF8563_REG_ST1, 9, buf);
if (err)
return err;
if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
dev_err(&client->dev,
"low voltage detected, date/time is not reliable.\n");
return -EINVAL;
}
dev_dbg(&client->dev,
"%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
__func__,
buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7],
buf[8]);
tm->tm_sec = bcd2bin(buf[PCF8563_REG_SC] & 0x7F);
tm->tm_min = bcd2bin(buf[PCF8563_REG_MN] & 0x7F);
tm->tm_hour = bcd2bin(buf[PCF8563_REG_HR] & 0x3F); /* rtc hr 0-23 */
tm->tm_mday = bcd2bin(buf[PCF8563_REG_DM] & 0x3F);
tm->tm_wday = buf[PCF8563_REG_DW] & 0x07;
tm->tm_mon = bcd2bin(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
tm->tm_year = bcd2bin(buf[PCF8563_REG_YR]) + 100;
/* detect the polarity heuristically. see note above. */
pcf8563->c_polarity = (buf[PCF8563_REG_MO] & PCF8563_MO_C) ?
(tm->tm_year >= 100) : (tm->tm_year < 100);
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
return 0;
}
static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
unsigned char buf[9];
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
/* hours, minutes and seconds */
buf[PCF8563_REG_SC] = bin2bcd(tm->tm_sec);
buf[PCF8563_REG_MN] = bin2bcd(tm->tm_min);
buf[PCF8563_REG_HR] = bin2bcd(tm->tm_hour);
buf[PCF8563_REG_DM] = bin2bcd(tm->tm_mday);
/* month, 1 - 12 */
buf[PCF8563_REG_MO] = bin2bcd(tm->tm_mon + 1);
/* year and century */
buf[PCF8563_REG_YR] = bin2bcd(tm->tm_year - 100);
if (pcf8563->c_polarity ? (tm->tm_year >= 100) : (tm->tm_year < 100))
buf[PCF8563_REG_MO] |= PCF8563_MO_C;
buf[PCF8563_REG_DW] = tm->tm_wday & 0x07;
return pcf8563_write_block_data(client, PCF8563_REG_SC,
9 - PCF8563_REG_SC, buf + PCF8563_REG_SC);
}
static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
switch (cmd) {
case RTC_VL_READ:
ret = i2c_smbus_read_byte_data(client, PCF8563_REG_SC);
if (ret < 0)
return ret;
return put_user(ret & PCF8563_SC_LV ? RTC_VL_DATA_INVALID : 0,
(unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[4];
int err;
err = pcf8563_read_block_data(client, PCF8563_REG_AMN, 4, buf);
if (err)
return err;
dev_dbg(&client->dev,
"%s: raw data is min=%02x, hr=%02x, mday=%02x, wday=%02x\n",
__func__, buf[0], buf[1], buf[2], buf[3]);
tm->time.tm_sec = 0;
tm->time.tm_min = bcd2bin(buf[0] & 0x7F);
tm->time.tm_hour = bcd2bin(buf[1] & 0x3F);
tm->time.tm_mday = bcd2bin(buf[2] & 0x3F);
tm->time.tm_wday = bcd2bin(buf[3] & 0x7);
err = pcf8563_get_alarm_mode(client, &tm->enabled, &tm->pending);
if (err < 0)
return err;
dev_dbg(&client->dev, "%s: tm is mins=%d, hours=%d, mday=%d, wday=%d,"
" enabled=%d, pending=%d\n", __func__, tm->time.tm_min,
tm->time.tm_hour, tm->time.tm_mday, tm->time.tm_wday,
tm->enabled, tm->pending);
return 0;
}
static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[4];
int err;
buf[0] = bin2bcd(tm->time.tm_min);
buf[1] = bin2bcd(tm->time.tm_hour);
buf[2] = bin2bcd(tm->time.tm_mday);
buf[3] = tm->time.tm_wday & 0x07;
err = pcf8563_write_block_data(client, PCF8563_REG_AMN, 4, buf);
if (err)
return err;
return pcf8563_set_alarm_mode(client, !!tm->enabled);
}
static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
{
dev_dbg(dev, "%s: en=%d\n", __func__, enabled);
return pcf8563_set_alarm_mode(to_i2c_client(dev), !!enabled);
}
#ifdef CONFIG_COMMON_CLK
/*
* Handling of the clkout
*/
#define clkout_hw_to_pcf8563(_hw) container_of(_hw, struct pcf8563, clkout_hw)
static const int clkout_rates[] = {
32768,
1024,
32,
1,
};
static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pcf8563 *pcf8563 = clkout_hw_to_pcf8563(hw);
struct i2c_client *client = pcf8563->client;
unsigned char buf;
int ret = pcf8563_read_block_data(client, PCF8563_REG_CLKO, 1, &buf);
if (ret < 0)
return 0;
buf &= PCF8563_REG_CLKO_F_MASK;
return clkout_rates[buf];
}
static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
if (clkout_rates[i] <= rate)
return clkout_rates[i];
return 0;
}
static int pcf8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct pcf8563 *pcf8563 = clkout_hw_to_pcf8563(hw);
struct i2c_client *client = pcf8563->client;
unsigned char buf;
int ret = pcf8563_read_block_data(client, PCF8563_REG_CLKO, 1, &buf);
int i;
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
if (clkout_rates[i] == rate) {
buf &= ~PCF8563_REG_CLKO_F_MASK;
buf |= i;
ret = pcf8563_write_block_data(client,
PCF8563_REG_CLKO, 1,
&buf);
return ret;
}
return -EINVAL;
}
static int pcf8563_clkout_control(struct clk_hw *hw, bool enable)
{
struct pcf8563 *pcf8563 = clkout_hw_to_pcf8563(hw);
struct i2c_client *client = pcf8563->client;
unsigned char buf;
int ret = pcf8563_read_block_data(client, PCF8563_REG_CLKO, 1, &buf);
if (ret < 0)
return ret;
if (enable)
buf |= PCF8563_REG_CLKO_FE;
else
buf &= ~PCF8563_REG_CLKO_FE;
ret = pcf8563_write_block_data(client, PCF8563_REG_CLKO, 1, &buf);
return ret;
}
static int pcf8563_clkout_prepare(struct clk_hw *hw)
{
return pcf8563_clkout_control(hw, 1);
}
static void pcf8563_clkout_unprepare(struct clk_hw *hw)
{
pcf8563_clkout_control(hw, 0);
}
static int pcf8563_clkout_is_prepared(struct clk_hw *hw)
{
struct pcf8563 *pcf8563 = clkout_hw_to_pcf8563(hw);
struct i2c_client *client = pcf8563->client;
unsigned char buf;
int ret = pcf8563_read_block_data(client, PCF8563_REG_CLKO, 1, &buf);
if (ret < 0)
return ret;
return !!(buf & PCF8563_REG_CLKO_FE);
}
static const struct clk_ops pcf8563_clkout_ops = {
.prepare = pcf8563_clkout_prepare,
.unprepare = pcf8563_clkout_unprepare,
.is_prepared = pcf8563_clkout_is_prepared,
.recalc_rate = pcf8563_clkout_recalc_rate,
.round_rate = pcf8563_clkout_round_rate,
.set_rate = pcf8563_clkout_set_rate,
};
static struct clk *pcf8563_clkout_register_clk(struct pcf8563 *pcf8563)
{
struct i2c_client *client = pcf8563->client;
struct device_node *node = client->dev.of_node;
struct clk *clk;
struct clk_init_data init;
int ret;
unsigned char buf;
/* disable the clkout output */
buf = 0;
ret = pcf8563_write_block_data(client, PCF8563_REG_CLKO, 1, &buf);
if (ret < 0)
return ERR_PTR(ret);
init.name = "pcf8563-clkout";
init.ops = &pcf8563_clkout_ops;
init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
pcf8563->clkout_hw.init = &init;
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
/* register the clock */
clk = devm_clk_register(&client->dev, &pcf8563->clkout_hw);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
#endif
static const struct rtc_class_ops pcf8563_rtc_ops = {
.ioctl = pcf8563_rtc_ioctl,
.read_time = pcf8563_rtc_read_time,
.set_time = pcf8563_rtc_set_time,
.read_alarm = pcf8563_rtc_read_alarm,
.set_alarm = pcf8563_rtc_set_alarm,
.alarm_irq_enable = pcf8563_irq_enable,
};
static int pcf8563_probe(struct i2c_client *client)
{
struct pcf8563 *pcf8563;
int err;
unsigned char buf;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
pcf8563 = devm_kzalloc(&client->dev, sizeof(struct pcf8563),
GFP_KERNEL);
if (!pcf8563)
return -ENOMEM;
i2c_set_clientdata(client, pcf8563);
pcf8563->client = client;
device_set_wakeup_capable(&client->dev, 1);
/* Set timer to lowest frequency to save power (ref Haoyu datasheet) */
buf = PCF8563_TMRC_1_60;
err = pcf8563_write_block_data(client, PCF8563_REG_TMRC, 1, &buf);
if (err < 0) {
dev_err(&client->dev, "%s: write error\n", __func__);
return err;
}
/* Clear flags and disable interrupts */
buf = 0;
err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, &buf);
if (err < 0) {
dev_err(&client->dev, "%s: write error\n", __func__);
return err;
}
pcf8563->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(pcf8563->rtc))
return PTR_ERR(pcf8563->rtc);
pcf8563->rtc->ops = &pcf8563_rtc_ops;
/* the pcf8563 alarm only supports a minute accuracy */
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, pcf8563->rtc->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf8563->rtc->features);
pcf8563->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf8563->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf8563->rtc->set_start_time = true;
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&client->dev))
irqflags = 0;
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf8563_irq,
IRQF_SHARED | IRQF_ONESHOT | irqflags,
pcf8563_driver.driver.name, client);
if (err) {
dev_err(&client->dev, "unable to request IRQ %d\n",
client->irq);
return err;
}
} else {
clear_bit(RTC_FEATURE_ALARM, pcf8563->rtc->features);
}
err = devm_rtc_register_device(pcf8563->rtc);
if (err)
return err;
#ifdef CONFIG_COMMON_CLK
/* register clk in common clk framework */
pcf8563_clkout_register_clk(pcf8563);
#endif
return 0;
}
static const struct i2c_device_id pcf8563_id[] = {
{ "pcf8563", 0 },
{ "rtc8564", 0 },
{ "pca8565", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8563_id);
#ifdef CONFIG_OF
static const struct of_device_id pcf8563_of_match[] = {
{ .compatible = "nxp,pcf8563" },
{ .compatible = "epson,rtc8564" },
{ .compatible = "microcrystal,rv8564" },
{ .compatible = "nxp,pca8565" },
{}
};
MODULE_DEVICE_TABLE(of, pcf8563_of_match);
#endif
static struct i2c_driver pcf8563_driver = {
.driver = {
.name = "rtc-pcf8563",
.of_match_table = of_match_ptr(pcf8563_of_match),
},
.probe = pcf8563_probe,
.id_table = pcf8563_id,
};
module_i2c_driver(pcf8563_driver);
MODULE_AUTHOR("Alessandro Zummo <[email protected]>");
MODULE_DESCRIPTION("Philips PCF8563/Epson RTC8564 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-pcf8563.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RTC driver for the interal RTC block in the Amlogic Meson6, Meson8,
* Meson8b and Meson8m2 SoCs.
*
* The RTC is split in to two parts, the AHB front end and a simple serial
* connection to the actual registers. This driver manages both parts.
*
* Copyright (c) 2018 Martin Blumenstingl <[email protected]>
* Copyright (c) 2015 Ben Dooks <[email protected]> for Codethink Ltd
* Based on origin by Carlo Caione <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/rtc.h>
/* registers accessed from cpu bus */
#define RTC_ADDR0 0x00
#define RTC_ADDR0_LINE_SCLK BIT(0)
#define RTC_ADDR0_LINE_SEN BIT(1)
#define RTC_ADDR0_LINE_SDI BIT(2)
#define RTC_ADDR0_START_SER BIT(17)
#define RTC_ADDR0_WAIT_SER BIT(22)
#define RTC_ADDR0_DATA GENMASK(31, 24)
#define RTC_ADDR1 0x04
#define RTC_ADDR1_SDO BIT(0)
#define RTC_ADDR1_S_READY BIT(1)
#define RTC_ADDR2 0x08
#define RTC_ADDR3 0x0c
#define RTC_REG4 0x10
#define RTC_REG4_STATIC_VALUE GENMASK(7, 0)
/* rtc registers accessed via rtc-serial interface */
#define RTC_COUNTER (0)
#define RTC_SEC_ADJ (2)
#define RTC_REGMEM_0 (4)
#define RTC_REGMEM_1 (5)
#define RTC_REGMEM_2 (6)
#define RTC_REGMEM_3 (7)
#define RTC_ADDR_BITS (3) /* number of address bits to send */
#define RTC_DATA_BITS (32) /* number of data bits to tx/rx */
#define MESON_STATIC_BIAS_CUR (0x5 << 1)
#define MESON_STATIC_VOLTAGE (0x3 << 11)
#define MESON_STATIC_DEFAULT (MESON_STATIC_BIAS_CUR | MESON_STATIC_VOLTAGE)
struct meson_rtc {
struct rtc_device *rtc; /* rtc device we created */
struct device *dev; /* device we bound from */
struct reset_control *reset; /* reset source */
struct regulator *vdd; /* voltage input */
struct regmap *peripheral; /* peripheral registers */
struct regmap *serial; /* serial registers */
};
static const struct regmap_config meson_rtc_peripheral_regmap_config = {
.name = "peripheral-registers",
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
.max_register = RTC_REG4,
.fast_io = true,
};
/* RTC front-end serialiser controls */
static void meson_rtc_sclk_pulse(struct meson_rtc *rtc)
{
udelay(5);
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SCLK, 0);
udelay(5);
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SCLK,
RTC_ADDR0_LINE_SCLK);
}
static void meson_rtc_send_bit(struct meson_rtc *rtc, unsigned int bit)
{
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SDI,
bit ? RTC_ADDR0_LINE_SDI : 0);
meson_rtc_sclk_pulse(rtc);
}
static void meson_rtc_send_bits(struct meson_rtc *rtc, u32 data,
unsigned int nr)
{
u32 bit = 1 << (nr - 1);
while (bit) {
meson_rtc_send_bit(rtc, data & bit);
bit >>= 1;
}
}
static void meson_rtc_set_dir(struct meson_rtc *rtc, u32 mode)
{
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SEN, 0);
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SDI, 0);
meson_rtc_send_bit(rtc, mode);
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SDI, 0);
}
static u32 meson_rtc_get_data(struct meson_rtc *rtc)
{
u32 tmp, val = 0;
int bit;
for (bit = 0; bit < RTC_DATA_BITS; bit++) {
meson_rtc_sclk_pulse(rtc);
val <<= 1;
regmap_read(rtc->peripheral, RTC_ADDR1, &tmp);
val |= tmp & RTC_ADDR1_SDO;
}
return val;
}
static int meson_rtc_get_bus(struct meson_rtc *rtc)
{
int ret, retries;
u32 val;
/* prepare bus for transfers, set all lines low */
val = RTC_ADDR0_LINE_SDI | RTC_ADDR0_LINE_SEN | RTC_ADDR0_LINE_SCLK;
regmap_update_bits(rtc->peripheral, RTC_ADDR0, val, 0);
for (retries = 0; retries < 3; retries++) {
/* wait for the bus to be ready */
if (!regmap_read_poll_timeout(rtc->peripheral, RTC_ADDR1, val,
val & RTC_ADDR1_S_READY, 10,
10000))
return 0;
dev_warn(rtc->dev, "failed to get bus, resetting RTC\n");
ret = reset_control_reset(rtc->reset);
if (ret)
return ret;
}
dev_err(rtc->dev, "bus is not ready\n");
return -ETIMEDOUT;
}
static int meson_rtc_serial_bus_reg_read(void *context, unsigned int reg,
unsigned int *data)
{
struct meson_rtc *rtc = context;
int ret;
ret = meson_rtc_get_bus(rtc);
if (ret)
return ret;
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SEN,
RTC_ADDR0_LINE_SEN);
meson_rtc_send_bits(rtc, reg, RTC_ADDR_BITS);
meson_rtc_set_dir(rtc, 0);
*data = meson_rtc_get_data(rtc);
return 0;
}
static int meson_rtc_serial_bus_reg_write(void *context, unsigned int reg,
unsigned int data)
{
struct meson_rtc *rtc = context;
int ret;
ret = meson_rtc_get_bus(rtc);
if (ret)
return ret;
regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SEN,
RTC_ADDR0_LINE_SEN);
meson_rtc_send_bits(rtc, data, RTC_DATA_BITS);
meson_rtc_send_bits(rtc, reg, RTC_ADDR_BITS);
meson_rtc_set_dir(rtc, 1);
return 0;
}
static const struct regmap_bus meson_rtc_serial_bus = {
.reg_read = meson_rtc_serial_bus_reg_read,
.reg_write = meson_rtc_serial_bus_reg_write,
};
static const struct regmap_config meson_rtc_serial_regmap_config = {
.name = "serial-registers",
.reg_bits = 4,
.reg_stride = 1,
.val_bits = 32,
.max_register = RTC_REGMEM_3,
.fast_io = false,
};
static int meson_rtc_write_static(struct meson_rtc *rtc, u32 data)
{
u32 tmp;
regmap_write(rtc->peripheral, RTC_REG4,
FIELD_PREP(RTC_REG4_STATIC_VALUE, (data >> 8)));
/* write the static value and start the auto serializer */
tmp = FIELD_PREP(RTC_ADDR0_DATA, (data & 0xff)) | RTC_ADDR0_START_SER;
regmap_update_bits(rtc->peripheral, RTC_ADDR0,
RTC_ADDR0_DATA | RTC_ADDR0_START_SER, tmp);
/* wait for the auto serializer to complete */
return regmap_read_poll_timeout(rtc->peripheral, RTC_REG4, tmp,
!(tmp & RTC_ADDR0_WAIT_SER), 10,
10000);
}
/* RTC interface layer functions */
static int meson_rtc_gettime(struct device *dev, struct rtc_time *tm)
{
struct meson_rtc *rtc = dev_get_drvdata(dev);
u32 time;
int ret;
ret = regmap_read(rtc->serial, RTC_COUNTER, &time);
if (!ret)
rtc_time64_to_tm(time, tm);
return ret;
}
static int meson_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct meson_rtc *rtc = dev_get_drvdata(dev);
return regmap_write(rtc->serial, RTC_COUNTER, rtc_tm_to_time64(tm));
}
static const struct rtc_class_ops meson_rtc_ops = {
.read_time = meson_rtc_gettime,
.set_time = meson_rtc_settime,
};
/* NVMEM interface layer functions */
static int meson_rtc_regmem_read(void *context, unsigned int offset,
void *buf, size_t bytes)
{
struct meson_rtc *rtc = context;
unsigned int read_offset, read_size;
read_offset = RTC_REGMEM_0 + (offset / 4);
read_size = bytes / 4;
return regmap_bulk_read(rtc->serial, read_offset, buf, read_size);
}
static int meson_rtc_regmem_write(void *context, unsigned int offset,
void *buf, size_t bytes)
{
struct meson_rtc *rtc = context;
unsigned int write_offset, write_size;
write_offset = RTC_REGMEM_0 + (offset / 4);
write_size = bytes / 4;
return regmap_bulk_write(rtc->serial, write_offset, buf, write_size);
}
static int meson_rtc_probe(struct platform_device *pdev)
{
struct nvmem_config meson_rtc_nvmem_config = {
.name = "meson-rtc-regmem",
.type = NVMEM_TYPE_BATTERY_BACKED,
.word_size = 4,
.stride = 4,
.size = 4 * 4,
.reg_read = meson_rtc_regmem_read,
.reg_write = meson_rtc_regmem_write,
};
struct device *dev = &pdev->dev;
struct meson_rtc *rtc;
void __iomem *base;
int ret;
u32 tm;
rtc = devm_kzalloc(dev, sizeof(struct meson_rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->rtc = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc))
return PTR_ERR(rtc->rtc);
platform_set_drvdata(pdev, rtc);
rtc->dev = dev;
rtc->rtc->ops = &meson_rtc_ops;
rtc->rtc->range_max = U32_MAX;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
rtc->peripheral = devm_regmap_init_mmio(dev, base,
&meson_rtc_peripheral_regmap_config);
if (IS_ERR(rtc->peripheral)) {
dev_err(dev, "failed to create peripheral regmap\n");
return PTR_ERR(rtc->peripheral);
}
rtc->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(rtc->reset)) {
dev_err(dev, "missing reset line\n");
return PTR_ERR(rtc->reset);
}
rtc->vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(rtc->vdd)) {
dev_err(dev, "failed to get the vdd-supply\n");
return PTR_ERR(rtc->vdd);
}
ret = regulator_enable(rtc->vdd);
if (ret) {
dev_err(dev, "failed to enable vdd-supply\n");
return ret;
}
ret = meson_rtc_write_static(rtc, MESON_STATIC_DEFAULT);
if (ret) {
dev_err(dev, "failed to set static values\n");
goto out_disable_vdd;
}
rtc->serial = devm_regmap_init(dev, &meson_rtc_serial_bus, rtc,
&meson_rtc_serial_regmap_config);
if (IS_ERR(rtc->serial)) {
dev_err(dev, "failed to create serial regmap\n");
ret = PTR_ERR(rtc->serial);
goto out_disable_vdd;
}
/*
* check if we can read RTC counter, if not then the RTC is probably
* not functional. If it isn't probably best to not bind.
*/
ret = regmap_read(rtc->serial, RTC_COUNTER, &tm);
if (ret) {
dev_err(dev, "cannot read RTC counter, RTC not functional\n");
goto out_disable_vdd;
}
meson_rtc_nvmem_config.priv = rtc;
ret = devm_rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config);
if (ret)
goto out_disable_vdd;
ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto out_disable_vdd;
return 0;
out_disable_vdd:
regulator_disable(rtc->vdd);
return ret;
}
static const __maybe_unused struct of_device_id meson_rtc_dt_match[] = {
{ .compatible = "amlogic,meson6-rtc", },
{ .compatible = "amlogic,meson8-rtc", },
{ .compatible = "amlogic,meson8b-rtc", },
{ .compatible = "amlogic,meson8m2-rtc", },
{ },
};
MODULE_DEVICE_TABLE(of, meson_rtc_dt_match);
static struct platform_driver meson_rtc_driver = {
.probe = meson_rtc_probe,
.driver = {
.name = "meson-rtc",
.of_match_table = of_match_ptr(meson_rtc_dt_match),
},
};
module_platform_driver(meson_rtc_driver);
MODULE_DESCRIPTION("Amlogic Meson RTC Driver");
MODULE_AUTHOR("Ben Dooks <[email protected]>");
MODULE_AUTHOR("Martin Blumenstingl <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:meson-rtc");
| linux-master | drivers/rtc/rtc-meson.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Real Time Clock driver for Freescale MC13XXX PMIC
*
* (C) 2009 Sascha Hauer, Pengutronix
* (C) 2009 Uwe Kleine-Koenig, Pengutronix
*/
#include <linux/mfd/mc13xxx.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/rtc.h>
#define DRIVER_NAME "mc13xxx-rtc"
#define MC13XXX_RTCTOD 20
#define MC13XXX_RTCTODA 21
#define MC13XXX_RTCDAY 22
#define MC13XXX_RTCDAYA 23
#define SEC_PER_DAY (24 * 60 * 60)
struct mc13xxx_rtc {
struct rtc_device *rtc;
struct mc13xxx *mc13xxx;
int valid;
};
static int mc13xxx_rtc_irq_enable_unlocked(struct device *dev,
unsigned int enabled, int irq)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
int (*func)(struct mc13xxx *mc13xxx, int irq);
if (!priv->valid)
return -ENODATA;
func = enabled ? mc13xxx_irq_unmask : mc13xxx_irq_mask;
return func(priv->mc13xxx, irq);
}
static int mc13xxx_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
int ret;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_rtc_irq_enable_unlocked(dev, enabled, MC13XXX_IRQ_TODA);
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned int seconds, days1, days2;
if (!priv->valid)
return -ENODATA;
do {
int ret;
ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days1);
if (ret)
return ret;
ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTOD, &seconds);
if (ret)
return ret;
ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days2);
if (ret)
return ret;
} while (days1 != days2);
rtc_time64_to_tm((time64_t)days1 * SEC_PER_DAY + seconds, tm);
return 0;
}
static int mc13xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned int seconds, days;
unsigned int alarmseconds;
int ret;
days = div_s64_rem(rtc_tm_to_time64(tm), SEC_PER_DAY, &seconds);
mc13xxx_lock(priv->mc13xxx);
/*
* temporarily invalidate alarm to prevent triggering it when the day is
* already updated while the time isn't yet.
*/
ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &alarmseconds);
if (unlikely(ret))
goto out;
if (alarmseconds < SEC_PER_DAY) {
ret = mc13xxx_reg_write(priv->mc13xxx,
MC13XXX_RTCTODA, 0x1ffff);
if (unlikely(ret))
goto out;
}
/*
* write seconds=0 to prevent a day switch between writing days
* and seconds below
*/
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTOD, 0);
if (unlikely(ret))
goto out;
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAY, days);
if (unlikely(ret))
goto out;
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTOD, seconds);
if (unlikely(ret))
goto out;
/* restore alarm */
if (alarmseconds < SEC_PER_DAY) {
ret = mc13xxx_reg_write(priv->mc13xxx,
MC13XXX_RTCTODA, alarmseconds);
if (unlikely(ret))
goto out;
}
if (!priv->valid) {
ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
if (unlikely(ret))
goto out;
ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
}
out:
priv->valid = !ret;
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
unsigned int seconds, days;
time64_t s1970;
int enabled, pending;
int ret;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCTODA, &seconds);
if (unlikely(ret))
goto out;
if (seconds >= SEC_PER_DAY) {
ret = -ENODATA;
goto out;
}
ret = mc13xxx_reg_read(priv->mc13xxx, MC13XXX_RTCDAY, &days);
if (unlikely(ret))
goto out;
ret = mc13xxx_irq_status(priv->mc13xxx, MC13XXX_IRQ_TODA,
&enabled, &pending);
out:
mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
alarm->enabled = enabled;
alarm->pending = pending;
s1970 = (time64_t)days * SEC_PER_DAY + seconds;
rtc_time64_to_tm(s1970, &alarm->time);
dev_dbg(dev, "%s: %lld\n", __func__, (long long)s1970);
return 0;
}
static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mc13xxx_rtc *priv = dev_get_drvdata(dev);
time64_t s1970;
u32 seconds, days;
int ret;
mc13xxx_lock(priv->mc13xxx);
/* disable alarm to prevent false triggering */
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, 0x1ffff);
if (unlikely(ret))
goto out;
ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_TODA);
if (unlikely(ret))
goto out;
s1970 = rtc_tm_to_time64(&alarm->time);
dev_dbg(dev, "%s: %s %lld\n", __func__, alarm->enabled ? "on" : "off",
(long long)s1970);
ret = mc13xxx_rtc_irq_enable_unlocked(dev, alarm->enabled,
MC13XXX_IRQ_TODA);
if (unlikely(ret))
goto out;
days = div_s64_rem(s1970, SEC_PER_DAY, &seconds);
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCDAYA, days);
if (unlikely(ret))
goto out;
ret = mc13xxx_reg_write(priv->mc13xxx, MC13XXX_RTCTODA, seconds);
out:
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev)
{
struct mc13xxx_rtc *priv = dev;
struct mc13xxx *mc13xxx = priv->mc13xxx;
rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF);
mc13xxx_irq_ack(mc13xxx, irq);
return IRQ_HANDLED;
}
static const struct rtc_class_ops mc13xxx_rtc_ops = {
.read_time = mc13xxx_rtc_read_time,
.set_time = mc13xxx_rtc_set_time,
.read_alarm = mc13xxx_rtc_read_alarm,
.set_alarm = mc13xxx_rtc_set_alarm,
.alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable,
};
static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev)
{
struct mc13xxx_rtc *priv = dev;
struct mc13xxx *mc13xxx = priv->mc13xxx;
priv->valid = 0;
mc13xxx_irq_mask(mc13xxx, irq);
return IRQ_HANDLED;
}
static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
{
int ret;
struct mc13xxx_rtc *priv;
struct mc13xxx *mc13xxx;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mc13xxx = dev_get_drvdata(pdev->dev.parent);
priv->mc13xxx = mc13xxx;
priv->valid = 1;
priv->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(priv->rtc))
return PTR_ERR(priv->rtc);
platform_set_drvdata(pdev, priv);
priv->rtc->ops = &mc13xxx_rtc_ops;
/* 15bit days + hours, minutes, seconds */
priv->rtc->range_max = (timeu64_t)(1 << 15) * SEC_PER_DAY - 1;
mc13xxx_lock(mc13xxx);
mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_RTCRST);
ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_RTCRST,
mc13xxx_rtc_reset_handler, DRIVER_NAME, priv);
if (ret)
goto err_irq_request;
ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_TODA,
mc13xxx_rtc_alarm_handler, DRIVER_NAME, priv);
if (ret)
goto err_irq_request;
mc13xxx_unlock(mc13xxx);
ret = devm_rtc_register_device(priv->rtc);
if (ret) {
mc13xxx_lock(mc13xxx);
goto err_irq_request;
}
return 0;
err_irq_request:
mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
mc13xxx_unlock(mc13xxx);
return ret;
}
static void mc13xxx_rtc_remove(struct platform_device *pdev)
{
struct mc13xxx_rtc *priv = platform_get_drvdata(pdev);
mc13xxx_lock(priv->mc13xxx);
mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_TODA, priv);
mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_RTCRST, priv);
mc13xxx_unlock(priv->mc13xxx);
}
static const struct platform_device_id mc13xxx_rtc_idtable[] = {
{
.name = "mc13783-rtc",
}, {
.name = "mc13892-rtc",
}, {
.name = "mc34708-rtc",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, mc13xxx_rtc_idtable);
static struct platform_driver mc13xxx_rtc_driver = {
.id_table = mc13xxx_rtc_idtable,
.remove_new = mc13xxx_rtc_remove,
.driver = {
.name = DRIVER_NAME,
},
};
module_platform_driver_probe(mc13xxx_rtc_driver, &mc13xxx_rtc_probe);
MODULE_AUTHOR("Sascha Hauer <[email protected]>");
MODULE_DESCRIPTION("RTC driver for Freescale MC13XXX PMIC");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-mc13xxx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Real time clocks driver for MStar/SigmaStar ARMv7 SoCs.
* Based on "Real Time Clock driver for msb252x." that was contained
* in various MStar kernels.
*
* (C) 2019 Daniel Palmer
* (C) 2021 Romain Perier
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
/* Registers */
#define REG_RTC_CTRL 0x00
#define REG_RTC_FREQ_CW_L 0x04
#define REG_RTC_FREQ_CW_H 0x08
#define REG_RTC_LOAD_VAL_L 0x0C
#define REG_RTC_LOAD_VAL_H 0x10
#define REG_RTC_MATCH_VAL_L 0x14
#define REG_RTC_MATCH_VAL_H 0x18
#define REG_RTC_STATUS_INT 0x1C
#define REG_RTC_CNT_VAL_L 0x20
#define REG_RTC_CNT_VAL_H 0x24
/* Control bits for REG_RTC_CTRL */
#define SOFT_RSTZ_BIT BIT(0)
#define CNT_EN_BIT BIT(1)
#define WRAP_EN_BIT BIT(2)
#define LOAD_EN_BIT BIT(3)
#define READ_EN_BIT BIT(4)
#define INT_MASK_BIT BIT(5)
#define INT_FORCE_BIT BIT(6)
#define INT_CLEAR_BIT BIT(7)
/* Control bits for REG_RTC_STATUS_INT */
#define RAW_INT_BIT BIT(0)
#define ALM_INT_BIT BIT(1)
struct msc313_rtc {
struct rtc_device *rtc_dev;
void __iomem *rtc_base;
};
static int msc313_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct msc313_rtc *priv = dev_get_drvdata(dev);
unsigned long seconds;
seconds = readw(priv->rtc_base + REG_RTC_MATCH_VAL_L)
| ((unsigned long)readw(priv->rtc_base + REG_RTC_MATCH_VAL_H) << 16);
rtc_time64_to_tm(seconds, &alarm->time);
if (!(readw(priv->rtc_base + REG_RTC_CTRL) & INT_MASK_BIT))
alarm->enabled = 1;
return 0;
}
static int msc313_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct msc313_rtc *priv = dev_get_drvdata(dev);
u16 reg;
reg = readw(priv->rtc_base + REG_RTC_CTRL);
if (enabled)
reg &= ~INT_MASK_BIT;
else
reg |= INT_MASK_BIT;
writew(reg, priv->rtc_base + REG_RTC_CTRL);
return 0;
}
static int msc313_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct msc313_rtc *priv = dev_get_drvdata(dev);
unsigned long seconds;
seconds = rtc_tm_to_time64(&alarm->time);
writew((seconds & 0xFFFF), priv->rtc_base + REG_RTC_MATCH_VAL_L);
writew((seconds >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_MATCH_VAL_H);
msc313_rtc_alarm_irq_enable(dev, alarm->enabled);
return 0;
}
static bool msc313_rtc_get_enabled(struct msc313_rtc *priv)
{
return readw(priv->rtc_base + REG_RTC_CTRL) & CNT_EN_BIT;
}
static void msc313_rtc_set_enabled(struct msc313_rtc *priv)
{
u16 reg;
reg = readw(priv->rtc_base + REG_RTC_CTRL);
reg |= CNT_EN_BIT;
writew(reg, priv->rtc_base + REG_RTC_CTRL);
}
static int msc313_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct msc313_rtc *priv = dev_get_drvdata(dev);
u32 seconds;
u16 reg;
if (!msc313_rtc_get_enabled(priv))
return -EINVAL;
reg = readw(priv->rtc_base + REG_RTC_CTRL);
writew(reg | READ_EN_BIT, priv->rtc_base + REG_RTC_CTRL);
/* Wait for HW latch done */
while (readw(priv->rtc_base + REG_RTC_CTRL) & READ_EN_BIT)
udelay(1);
seconds = readw(priv->rtc_base + REG_RTC_CNT_VAL_L)
| ((unsigned long)readw(priv->rtc_base + REG_RTC_CNT_VAL_H) << 16);
rtc_time64_to_tm(seconds, tm);
return 0;
}
static int msc313_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct msc313_rtc *priv = dev_get_drvdata(dev);
unsigned long seconds;
u16 reg;
seconds = rtc_tm_to_time64(tm);
writew(seconds & 0xFFFF, priv->rtc_base + REG_RTC_LOAD_VAL_L);
writew((seconds >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_LOAD_VAL_H);
/* Enable load for loading value into internal RTC counter */
reg = readw(priv->rtc_base + REG_RTC_CTRL);
writew(reg | LOAD_EN_BIT, priv->rtc_base + REG_RTC_CTRL);
/* Wait for HW latch done */
while (readw(priv->rtc_base + REG_RTC_CTRL) & LOAD_EN_BIT)
udelay(1);
msc313_rtc_set_enabled(priv);
return 0;
}
static const struct rtc_class_ops msc313_rtc_ops = {
.read_time = msc313_rtc_read_time,
.set_time = msc313_rtc_set_time,
.read_alarm = msc313_rtc_read_alarm,
.set_alarm = msc313_rtc_set_alarm,
.alarm_irq_enable = msc313_rtc_alarm_irq_enable,
};
static irqreturn_t msc313_rtc_interrupt(s32 irq, void *dev_id)
{
struct msc313_rtc *priv = dev_get_drvdata(dev_id);
u16 reg;
reg = readw(priv->rtc_base + REG_RTC_STATUS_INT);
if (!(reg & ALM_INT_BIT))
return IRQ_NONE;
reg = readw(priv->rtc_base + REG_RTC_CTRL);
reg |= INT_CLEAR_BIT;
reg &= ~INT_FORCE_BIT;
writew(reg, priv->rtc_base + REG_RTC_CTRL);
rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static int msc313_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct msc313_rtc *priv;
unsigned long rate;
struct clk *clk;
int ret;
int irq;
priv = devm_kzalloc(&pdev->dev, sizeof(struct msc313_rtc), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->rtc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->rtc_base))
return PTR_ERR(priv->rtc_base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
priv->rtc_dev = devm_rtc_allocate_device(dev);
if (IS_ERR(priv->rtc_dev))
return PTR_ERR(priv->rtc_dev);
priv->rtc_dev->ops = &msc313_rtc_ops;
priv->rtc_dev->range_max = U32_MAX;
ret = devm_request_irq(dev, irq, msc313_rtc_interrupt, IRQF_SHARED,
dev_name(&pdev->dev), &pdev->dev);
if (ret) {
dev_err(dev, "Could not request IRQ\n");
return ret;
}
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "No input reference clock\n");
return PTR_ERR(clk);
}
rate = clk_get_rate(clk);
writew(rate & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_L);
writew((rate >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_H);
platform_set_drvdata(pdev, priv);
return devm_rtc_register_device(priv->rtc_dev);
}
static const struct of_device_id msc313_rtc_of_match_table[] = {
{ .compatible = "mstar,msc313-rtc" },
{ }
};
MODULE_DEVICE_TABLE(of, msc313_rtc_of_match_table);
static struct platform_driver msc313_rtc_driver = {
.probe = msc313_rtc_probe,
.driver = {
.name = "msc313-rtc",
.of_match_table = msc313_rtc_of_match_table,
},
};
module_platform_driver(msc313_rtc_driver);
MODULE_AUTHOR("Daniel Palmer <[email protected]>");
MODULE_AUTHOR("Romain Perier <[email protected]>");
MODULE_DESCRIPTION("MStar RTC Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-msc313.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the RTC in Marvell SoCs.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#include <linux/module.h>
#define RTC_TIME_REG_OFFS 0
#define RTC_SECONDS_OFFS 0
#define RTC_MINUTES_OFFS 8
#define RTC_HOURS_OFFS 16
#define RTC_WDAY_OFFS 24
#define RTC_HOURS_12H_MODE BIT(22) /* 12 hour mode */
#define RTC_DATE_REG_OFFS 4
#define RTC_MDAY_OFFS 0
#define RTC_MONTH_OFFS 8
#define RTC_YEAR_OFFS 16
#define RTC_ALARM_TIME_REG_OFFS 8
#define RTC_ALARM_DATE_REG_OFFS 0xc
#define RTC_ALARM_VALID BIT(7)
#define RTC_ALARM_INTERRUPT_MASK_REG_OFFS 0x10
#define RTC_ALARM_INTERRUPT_CASUE_REG_OFFS 0x14
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
int irq;
struct clk *clk;
};
static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_reg;
rtc_reg = (bin2bcd(tm->tm_sec) << RTC_SECONDS_OFFS) |
(bin2bcd(tm->tm_min) << RTC_MINUTES_OFFS) |
(bin2bcd(tm->tm_hour) << RTC_HOURS_OFFS) |
(bin2bcd(tm->tm_wday) << RTC_WDAY_OFFS);
writel(rtc_reg, ioaddr + RTC_TIME_REG_OFFS);
rtc_reg = (bin2bcd(tm->tm_mday) << RTC_MDAY_OFFS) |
(bin2bcd(tm->tm_mon + 1) << RTC_MONTH_OFFS) |
(bin2bcd(tm->tm_year - 100) << RTC_YEAR_OFFS);
writel(rtc_reg, ioaddr + RTC_DATE_REG_OFFS);
return 0;
}
static int mv_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_time, rtc_date;
unsigned int year, month, day, hour, minute, second, wday;
rtc_time = readl(ioaddr + RTC_TIME_REG_OFFS);
rtc_date = readl(ioaddr + RTC_DATE_REG_OFFS);
second = rtc_time & 0x7f;
minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hour mode */
wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
day = rtc_date & 0x3f;
month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
tm->tm_sec = bcd2bin(second);
tm->tm_min = bcd2bin(minute);
tm->tm_hour = bcd2bin(hour);
tm->tm_mday = bcd2bin(day);
tm->tm_wday = bcd2bin(wday);
tm->tm_mon = bcd2bin(month) - 1;
/* hw counts from year 2000, but tm_year is relative to 1900 */
tm->tm_year = bcd2bin(year) + 100;
return 0;
}
static int mv_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_time, rtc_date;
unsigned int year, month, day, hour, minute, second, wday;
rtc_time = readl(ioaddr + RTC_ALARM_TIME_REG_OFFS);
rtc_date = readl(ioaddr + RTC_ALARM_DATE_REG_OFFS);
second = rtc_time & 0x7f;
minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hour mode */
wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
day = rtc_date & 0x3f;
month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
alm->time.tm_sec = bcd2bin(second);
alm->time.tm_min = bcd2bin(minute);
alm->time.tm_hour = bcd2bin(hour);
alm->time.tm_mday = bcd2bin(day);
alm->time.tm_wday = bcd2bin(wday);
alm->time.tm_mon = bcd2bin(month) - 1;
/* hw counts from year 2000, but tm_year is relative to 1900 */
alm->time.tm_year = bcd2bin(year) + 100;
alm->enabled = !!readl(ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return rtc_valid_tm(&alm->time);
}
static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 rtc_reg = 0;
if (alm->time.tm_sec >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_sec))
<< RTC_SECONDS_OFFS;
if (alm->time.tm_min >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_min))
<< RTC_MINUTES_OFFS;
if (alm->time.tm_hour >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_hour))
<< RTC_HOURS_OFFS;
writel(rtc_reg, ioaddr + RTC_ALARM_TIME_REG_OFFS);
if (alm->time.tm_mday >= 0)
rtc_reg = (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mday))
<< RTC_MDAY_OFFS;
else
rtc_reg = 0;
if (alm->time.tm_mon >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mon + 1))
<< RTC_MONTH_OFFS;
if (alm->time.tm_year >= 0)
rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year - 100))
<< RTC_YEAR_OFFS;
writel(rtc_reg, ioaddr + RTC_ALARM_DATE_REG_OFFS);
writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
writel(alm->enabled ? 1 : 0,
ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return 0;
}
static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
if (pdata->irq < 0)
return -EINVAL; /* fall back into rtc-dev's emulation */
if (enabled)
writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
else
writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return 0;
}
static irqreturn_t mv_rtc_interrupt(int irq, void *data)
{
struct rtc_plat_data *pdata = data;
void __iomem *ioaddr = pdata->ioaddr;
/* alarm irq? */
if (!readl(ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS))
return IRQ_NONE;
/* clear interrupt */
writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops mv_rtc_ops = {
.read_time = mv_rtc_read_time,
.set_time = mv_rtc_set_time,
.read_alarm = mv_rtc_read_alarm,
.set_alarm = mv_rtc_set_alarm,
.alarm_irq_enable = mv_rtc_alarm_irq_enable,
};
static int __init mv_rtc_probe(struct platform_device *pdev)
{
struct rtc_plat_data *pdata;
u32 rtc_time;
int ret = 0;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
/* Not all SoCs require a clock.*/
if (!IS_ERR(pdata->clk))
clk_prepare_enable(pdata->clk);
/* make sure the 24 hour mode is enabled */
rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS);
if (rtc_time & RTC_HOURS_12H_MODE) {
dev_err(&pdev->dev, "12 Hour mode is enabled but not supported.\n");
ret = -EINVAL;
goto out;
}
/* make sure it is actually functional */
if (rtc_time == 0x01000000) {
ssleep(1);
rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS);
if (rtc_time == 0x01000000) {
dev_err(&pdev->dev, "internal RTC not ticking\n");
ret = -ENODEV;
goto out;
}
}
pdata->irq = platform_get_irq(pdev, 0);
platform_set_drvdata(pdev, pdata);
pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(pdata->rtc)) {
ret = PTR_ERR(pdata->rtc);
goto out;
}
if (pdata->irq >= 0) {
writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
if (devm_request_irq(&pdev->dev, pdata->irq, mv_rtc_interrupt,
IRQF_SHARED,
pdev->name, pdata) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = -1;
}
}
if (pdata->irq >= 0)
device_init_wakeup(&pdev->dev, 1);
else
clear_bit(RTC_FEATURE_ALARM, pdata->rtc->features);
pdata->rtc->ops = &mv_rtc_ops;
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
ret = devm_rtc_register_device(pdata->rtc);
if (!ret)
return 0;
out:
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
return ret;
}
static int __exit mv_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq >= 0)
device_init_wakeup(&pdev->dev, 0);
if (!IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id rtc_mv_of_match_table[] = {
{ .compatible = "marvell,orion-rtc", },
{}
};
MODULE_DEVICE_TABLE(of, rtc_mv_of_match_table);
#endif
static struct platform_driver mv_rtc_driver = {
.remove = __exit_p(mv_rtc_remove),
.driver = {
.name = "rtc-mv",
.of_match_table = of_match_ptr(rtc_mv_of_match_table),
},
};
module_platform_driver_probe(mv_rtc_driver, mv_rtc_probe);
MODULE_AUTHOR("Saeed Bishara <[email protected]>");
MODULE_DESCRIPTION("Marvell RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rtc-mv");
| linux-master | drivers/rtc/rtc-mv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Spreadtrum Communications Inc.
*
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#define SPRD_RTC_SEC_CNT_VALUE 0x0
#define SPRD_RTC_MIN_CNT_VALUE 0x4
#define SPRD_RTC_HOUR_CNT_VALUE 0x8
#define SPRD_RTC_DAY_CNT_VALUE 0xc
#define SPRD_RTC_SEC_CNT_UPD 0x10
#define SPRD_RTC_MIN_CNT_UPD 0x14
#define SPRD_RTC_HOUR_CNT_UPD 0x18
#define SPRD_RTC_DAY_CNT_UPD 0x1c
#define SPRD_RTC_SEC_ALM_UPD 0x20
#define SPRD_RTC_MIN_ALM_UPD 0x24
#define SPRD_RTC_HOUR_ALM_UPD 0x28
#define SPRD_RTC_DAY_ALM_UPD 0x2c
#define SPRD_RTC_INT_EN 0x30
#define SPRD_RTC_INT_RAW_STS 0x34
#define SPRD_RTC_INT_CLR 0x38
#define SPRD_RTC_INT_MASK_STS 0x3C
#define SPRD_RTC_SEC_ALM_VALUE 0x40
#define SPRD_RTC_MIN_ALM_VALUE 0x44
#define SPRD_RTC_HOUR_ALM_VALUE 0x48
#define SPRD_RTC_DAY_ALM_VALUE 0x4c
#define SPRD_RTC_SPG_VALUE 0x50
#define SPRD_RTC_SPG_UPD 0x54
#define SPRD_RTC_PWR_CTRL 0x58
#define SPRD_RTC_PWR_STS 0x5c
#define SPRD_RTC_SEC_AUXALM_UPD 0x60
#define SPRD_RTC_MIN_AUXALM_UPD 0x64
#define SPRD_RTC_HOUR_AUXALM_UPD 0x68
#define SPRD_RTC_DAY_AUXALM_UPD 0x6c
/* BIT & MASK definition for SPRD_RTC_INT_* registers */
#define SPRD_RTC_SEC_EN BIT(0)
#define SPRD_RTC_MIN_EN BIT(1)
#define SPRD_RTC_HOUR_EN BIT(2)
#define SPRD_RTC_DAY_EN BIT(3)
#define SPRD_RTC_ALARM_EN BIT(4)
#define SPRD_RTC_HRS_FORMAT_EN BIT(5)
#define SPRD_RTC_AUXALM_EN BIT(6)
#define SPRD_RTC_SPG_UPD_EN BIT(7)
#define SPRD_RTC_SEC_UPD_EN BIT(8)
#define SPRD_RTC_MIN_UPD_EN BIT(9)
#define SPRD_RTC_HOUR_UPD_EN BIT(10)
#define SPRD_RTC_DAY_UPD_EN BIT(11)
#define SPRD_RTC_ALMSEC_UPD_EN BIT(12)
#define SPRD_RTC_ALMMIN_UPD_EN BIT(13)
#define SPRD_RTC_ALMHOUR_UPD_EN BIT(14)
#define SPRD_RTC_ALMDAY_UPD_EN BIT(15)
#define SPRD_RTC_INT_MASK GENMASK(15, 0)
#define SPRD_RTC_TIME_INT_MASK \
(SPRD_RTC_SEC_UPD_EN | SPRD_RTC_MIN_UPD_EN | \
SPRD_RTC_HOUR_UPD_EN | SPRD_RTC_DAY_UPD_EN)
#define SPRD_RTC_ALMTIME_INT_MASK \
(SPRD_RTC_ALMSEC_UPD_EN | SPRD_RTC_ALMMIN_UPD_EN | \
SPRD_RTC_ALMHOUR_UPD_EN | SPRD_RTC_ALMDAY_UPD_EN)
#define SPRD_RTC_ALM_INT_MASK \
(SPRD_RTC_SEC_EN | SPRD_RTC_MIN_EN | \
SPRD_RTC_HOUR_EN | SPRD_RTC_DAY_EN | \
SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN)
/* second/minute/hour/day values mask definition */
#define SPRD_RTC_SEC_MASK GENMASK(5, 0)
#define SPRD_RTC_MIN_MASK GENMASK(5, 0)
#define SPRD_RTC_HOUR_MASK GENMASK(4, 0)
#define SPRD_RTC_DAY_MASK GENMASK(15, 0)
/* alarm lock definition for SPRD_RTC_SPG_UPD register */
#define SPRD_RTC_ALMLOCK_MASK GENMASK(7, 0)
#define SPRD_RTC_ALM_UNLOCK 0xa5
#define SPRD_RTC_ALM_LOCK (~SPRD_RTC_ALM_UNLOCK & \
SPRD_RTC_ALMLOCK_MASK)
/* SPG values definition for SPRD_RTC_SPG_UPD register */
#define SPRD_RTC_POWEROFF_ALM_FLAG BIT(8)
/* power control/status definition */
#define SPRD_RTC_POWER_RESET_VALUE 0x96
#define SPRD_RTC_POWER_STS_CLEAR GENMASK(7, 0)
#define SPRD_RTC_POWER_STS_SHIFT 8
#define SPRD_RTC_POWER_STS_VALID \
(~SPRD_RTC_POWER_RESET_VALUE << SPRD_RTC_POWER_STS_SHIFT)
/* timeout of synchronizing time and alarm registers (us) */
#define SPRD_RTC_POLL_TIMEOUT 200000
#define SPRD_RTC_POLL_DELAY_US 20000
struct sprd_rtc {
struct rtc_device *rtc;
struct regmap *regmap;
struct device *dev;
u32 base;
int irq;
bool valid;
};
/*
* The Spreadtrum RTC controller has 3 groups registers, including time, normal
* alarm and auxiliary alarm. The time group registers are used to set RTC time,
* the normal alarm registers are used to set normal alarm, and the auxiliary
* alarm registers are used to set auxiliary alarm. Both alarm event and
* auxiliary alarm event can wake up system from deep sleep, but only alarm
* event can power up system from power down status.
*/
enum sprd_rtc_reg_types {
SPRD_RTC_TIME,
SPRD_RTC_ALARM,
SPRD_RTC_AUX_ALARM,
};
static int sprd_rtc_clear_alarm_ints(struct sprd_rtc *rtc)
{
return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
SPRD_RTC_ALM_INT_MASK);
}
static int sprd_rtc_lock_alarm(struct sprd_rtc *rtc, bool lock)
{
int ret;
u32 val;
ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
if (ret)
return ret;
val &= ~SPRD_RTC_ALMLOCK_MASK;
if (lock)
val |= SPRD_RTC_ALM_LOCK;
else
val |= SPRD_RTC_ALM_UNLOCK | SPRD_RTC_POWEROFF_ALM_FLAG;
ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_SPG_UPD, val);
if (ret)
return ret;
/* wait until the SPG value is updated successfully */
ret = regmap_read_poll_timeout(rtc->regmap,
rtc->base + SPRD_RTC_INT_RAW_STS, val,
(val & SPRD_RTC_SPG_UPD_EN),
SPRD_RTC_POLL_DELAY_US,
SPRD_RTC_POLL_TIMEOUT);
if (ret) {
dev_err(rtc->dev, "failed to update SPG value:%d\n", ret);
return ret;
}
return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
SPRD_RTC_SPG_UPD_EN);
}
static int sprd_rtc_get_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
time64_t *secs)
{
u32 sec_reg, min_reg, hour_reg, day_reg;
u32 val, sec, min, hour, day;
int ret;
switch (type) {
case SPRD_RTC_TIME:
sec_reg = SPRD_RTC_SEC_CNT_VALUE;
min_reg = SPRD_RTC_MIN_CNT_VALUE;
hour_reg = SPRD_RTC_HOUR_CNT_VALUE;
day_reg = SPRD_RTC_DAY_CNT_VALUE;
break;
case SPRD_RTC_ALARM:
sec_reg = SPRD_RTC_SEC_ALM_VALUE;
min_reg = SPRD_RTC_MIN_ALM_VALUE;
hour_reg = SPRD_RTC_HOUR_ALM_VALUE;
day_reg = SPRD_RTC_DAY_ALM_VALUE;
break;
case SPRD_RTC_AUX_ALARM:
sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
min_reg = SPRD_RTC_MIN_AUXALM_UPD;
hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
day_reg = SPRD_RTC_DAY_AUXALM_UPD;
break;
default:
return -EINVAL;
}
ret = regmap_read(rtc->regmap, rtc->base + sec_reg, &val);
if (ret)
return ret;
sec = val & SPRD_RTC_SEC_MASK;
ret = regmap_read(rtc->regmap, rtc->base + min_reg, &val);
if (ret)
return ret;
min = val & SPRD_RTC_MIN_MASK;
ret = regmap_read(rtc->regmap, rtc->base + hour_reg, &val);
if (ret)
return ret;
hour = val & SPRD_RTC_HOUR_MASK;
ret = regmap_read(rtc->regmap, rtc->base + day_reg, &val);
if (ret)
return ret;
day = val & SPRD_RTC_DAY_MASK;
*secs = (((time64_t)(day * 24) + hour) * 60 + min) * 60 + sec;
return 0;
}
static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
time64_t secs)
{
u32 sec_reg, min_reg, hour_reg, day_reg, sts_mask;
u32 sec, min, hour, day, val;
int ret, rem;
/* convert seconds to RTC time format */
day = div_s64_rem(secs, 86400, &rem);
hour = rem / 3600;
rem -= hour * 3600;
min = rem / 60;
sec = rem - min * 60;
switch (type) {
case SPRD_RTC_TIME:
sec_reg = SPRD_RTC_SEC_CNT_UPD;
min_reg = SPRD_RTC_MIN_CNT_UPD;
hour_reg = SPRD_RTC_HOUR_CNT_UPD;
day_reg = SPRD_RTC_DAY_CNT_UPD;
sts_mask = SPRD_RTC_TIME_INT_MASK;
break;
case SPRD_RTC_ALARM:
sec_reg = SPRD_RTC_SEC_ALM_UPD;
min_reg = SPRD_RTC_MIN_ALM_UPD;
hour_reg = SPRD_RTC_HOUR_ALM_UPD;
day_reg = SPRD_RTC_DAY_ALM_UPD;
sts_mask = SPRD_RTC_ALMTIME_INT_MASK;
break;
case SPRD_RTC_AUX_ALARM:
sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
min_reg = SPRD_RTC_MIN_AUXALM_UPD;
hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
day_reg = SPRD_RTC_DAY_AUXALM_UPD;
sts_mask = 0;
break;
default:
return -EINVAL;
}
ret = regmap_write(rtc->regmap, rtc->base + sec_reg, sec);
if (ret)
return ret;
ret = regmap_write(rtc->regmap, rtc->base + min_reg, min);
if (ret)
return ret;
ret = regmap_write(rtc->regmap, rtc->base + hour_reg, hour);
if (ret)
return ret;
ret = regmap_write(rtc->regmap, rtc->base + day_reg, day);
if (ret)
return ret;
if (type == SPRD_RTC_AUX_ALARM)
return 0;
/*
* Since the time and normal alarm registers are put in always-power-on
* region supplied by VDDRTC, then these registers changing time will
* be very long, about 125ms. Thus here we should wait until all
* values are updated successfully.
*/
ret = regmap_read_poll_timeout(rtc->regmap,
rtc->base + SPRD_RTC_INT_RAW_STS, val,
((val & sts_mask) == sts_mask),
SPRD_RTC_POLL_DELAY_US,
SPRD_RTC_POLL_TIMEOUT);
if (ret < 0) {
dev_err(rtc->dev, "set time/alarm values timeout\n");
return ret;
}
return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
sts_mask);
}
static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
time64_t secs = rtc_tm_to_time64(&alrm->time);
int ret;
/* clear the auxiliary alarm interrupt status */
ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
SPRD_RTC_AUXALM_EN);
if (ret)
return ret;
ret = sprd_rtc_set_secs(rtc, SPRD_RTC_AUX_ALARM, secs);
if (ret)
return ret;
if (alrm->enabled) {
ret = regmap_update_bits(rtc->regmap,
rtc->base + SPRD_RTC_INT_EN,
SPRD_RTC_AUXALM_EN,
SPRD_RTC_AUXALM_EN);
} else {
ret = regmap_update_bits(rtc->regmap,
rtc->base + SPRD_RTC_INT_EN,
SPRD_RTC_AUXALM_EN, 0);
}
return ret;
}
static int sprd_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
time64_t secs;
int ret;
if (!rtc->valid) {
dev_warn(dev, "RTC values are invalid\n");
return -EINVAL;
}
ret = sprd_rtc_get_secs(rtc, SPRD_RTC_TIME, &secs);
if (ret)
return ret;
rtc_time64_to_tm(secs, tm);
return 0;
}
static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
time64_t secs = rtc_tm_to_time64(tm);
int ret;
ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs);
if (ret)
return ret;
if (!rtc->valid) {
/* Clear RTC power status firstly */
ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_PWR_CTRL,
SPRD_RTC_POWER_STS_CLEAR);
if (ret)
return ret;
/*
* Set RTC power status to indicate now RTC has valid time
* values.
*/
ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_PWR_CTRL,
SPRD_RTC_POWER_STS_VALID);
if (ret)
return ret;
rtc->valid = true;
}
return 0;
}
static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
time64_t secs;
int ret;
u32 val;
/*
* The RTC core checks to see if there is an alarm already set in RTC
* hardware, and we always read the normal alarm at this time.
*/
ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
if (ret)
return ret;
rtc_time64_to_tm(secs, &alrm->time);
ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
if (ret)
return ret;
alrm->enabled = !!(val & SPRD_RTC_ALARM_EN);
ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
if (ret)
return ret;
alrm->pending = !!(val & SPRD_RTC_ALARM_EN);
return 0;
}
static int sprd_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
time64_t secs = rtc_tm_to_time64(&alrm->time);
struct rtc_time aie_time =
rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires);
int ret;
/*
* We have 2 groups alarms: normal alarm and auxiliary alarm. Since
* both normal alarm event and auxiliary alarm event can wake up system
* from deep sleep, but only alarm event can power up system from power
* down status. Moreover we do not need to poll about 125ms when
* updating auxiliary alarm registers. Thus we usually set auxiliary
* alarm when wake up system from deep sleep, and for other scenarios,
* we should set normal alarm with polling status.
*
* So here we check if the alarm time is set by aie_timer, if yes, we
* should set normal alarm, if not, we should set auxiliary alarm which
* means it is just a wake event.
*/
if (!rtc->rtc->aie_timer.enabled || rtc_tm_sub(&aie_time, &alrm->time))
return sprd_rtc_set_aux_alarm(dev, alrm);
/* clear the alarm interrupt status firstly */
ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
SPRD_RTC_ALARM_EN);
if (ret)
return ret;
ret = sprd_rtc_set_secs(rtc, SPRD_RTC_ALARM, secs);
if (ret)
return ret;
if (alrm->enabled) {
ret = regmap_update_bits(rtc->regmap,
rtc->base + SPRD_RTC_INT_EN,
SPRD_RTC_ALARM_EN,
SPRD_RTC_ALARM_EN);
if (ret)
return ret;
/* unlock the alarm to enable the alarm function. */
ret = sprd_rtc_lock_alarm(rtc, false);
} else {
regmap_update_bits(rtc->regmap,
rtc->base + SPRD_RTC_INT_EN,
SPRD_RTC_ALARM_EN, 0);
/*
* Lock the alarm function in case fake alarm event will power
* up systems.
*/
ret = sprd_rtc_lock_alarm(rtc, true);
}
return ret;
}
static int sprd_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
int ret;
if (enabled) {
ret = regmap_update_bits(rtc->regmap,
rtc->base + SPRD_RTC_INT_EN,
SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN,
SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN);
if (ret)
return ret;
ret = sprd_rtc_lock_alarm(rtc, false);
} else {
regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, 0);
ret = sprd_rtc_lock_alarm(rtc, true);
}
return ret;
}
static const struct rtc_class_ops sprd_rtc_ops = {
.read_time = sprd_rtc_read_time,
.set_time = sprd_rtc_set_time,
.read_alarm = sprd_rtc_read_alarm,
.set_alarm = sprd_rtc_set_alarm,
.alarm_irq_enable = sprd_rtc_alarm_irq_enable,
};
static irqreturn_t sprd_rtc_handler(int irq, void *dev_id)
{
struct sprd_rtc *rtc = dev_id;
int ret;
ret = sprd_rtc_clear_alarm_ints(rtc);
if (ret)
return IRQ_RETVAL(ret);
rtc_update_irq(rtc->rtc, 1, RTC_AF | RTC_IRQF);
return IRQ_HANDLED;
}
static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
{
u32 val;
int ret;
ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_PWR_STS, &val);
if (ret)
return ret;
/*
* If the RTC power status value is SPRD_RTC_POWER_RESET_VALUE, which
* means the RTC has been powered down, so the RTC time values are
* invalid.
*/
rtc->valid = val != SPRD_RTC_POWER_RESET_VALUE;
return 0;
}
static int sprd_rtc_check_alarm_int(struct sprd_rtc *rtc)
{
u32 val;
int ret;
ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
if (ret)
return ret;
/*
* The SPRD_RTC_INT_EN register is not put in always-power-on region
* supplied by VDDRTC, so we should check if we need enable the alarm
* interrupt when system booting.
*
* If we have set SPRD_RTC_POWEROFF_ALM_FLAG which is saved in
* always-power-on region, that means we have set one alarm last time,
* so we should enable the alarm interrupt to help RTC core to see if
* there is an alarm already set in RTC hardware.
*/
if (!(val & SPRD_RTC_POWEROFF_ALM_FLAG))
return 0;
return regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
SPRD_RTC_ALARM_EN, SPRD_RTC_ALARM_EN);
}
static int sprd_rtc_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct sprd_rtc *rtc;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!rtc->regmap)
return -ENODEV;
ret = of_property_read_u32(node, "reg", &rtc->base);
if (ret) {
dev_err(&pdev->dev, "failed to get RTC base address\n");
return ret;
}
rtc->irq = platform_get_irq(pdev, 0);
if (rtc->irq < 0)
return rtc->irq;
rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc))
return PTR_ERR(rtc->rtc);
rtc->dev = &pdev->dev;
platform_set_drvdata(pdev, rtc);
/* check if we need set the alarm interrupt */
ret = sprd_rtc_check_alarm_int(rtc);
if (ret) {
dev_err(&pdev->dev, "failed to check RTC alarm interrupt\n");
return ret;
}
/* check if RTC time values are valid */
ret = sprd_rtc_check_power_down(rtc);
if (ret) {
dev_err(&pdev->dev, "failed to check RTC time values\n");
return ret;
}
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
sprd_rtc_handler,
IRQF_ONESHOT | IRQF_EARLY_RESUME,
pdev->name, rtc);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request RTC irq\n");
return ret;
}
device_init_wakeup(&pdev->dev, 1);
rtc->rtc->ops = &sprd_rtc_ops;
rtc->rtc->range_min = 0;
rtc->rtc->range_max = 5662310399LL;
ret = devm_rtc_register_device(rtc->rtc);
if (ret) {
device_init_wakeup(&pdev->dev, 0);
return ret;
}
return 0;
}
static const struct of_device_id sprd_rtc_of_match[] = {
{ .compatible = "sprd,sc2731-rtc", },
{ },
};
MODULE_DEVICE_TABLE(of, sprd_rtc_of_match);
static struct platform_driver sprd_rtc_driver = {
.driver = {
.name = "sprd-rtc",
.of_match_table = sprd_rtc_of_match,
},
.probe = sprd_rtc_probe,
};
module_platform_driver(sprd_rtc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Spreadtrum RTC Device Driver");
MODULE_AUTHOR("Baolin Wang <[email protected]>");
| linux-master | drivers/rtc/rtc-sc27xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ricoh RP5C01 RTC Driver
*
* Copyright 2009 Geert Uytterhoeven
*
* Based on the A3000 TOD code in arch/m68k/amiga/config.c
* Copyright (C) 1993 Hamish Macdonald
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
enum {
RP5C01_1_SECOND = 0x0, /* MODE 00 */
RP5C01_10_SECOND = 0x1, /* MODE 00 */
RP5C01_1_MINUTE = 0x2, /* MODE 00 and MODE 01 */
RP5C01_10_MINUTE = 0x3, /* MODE 00 and MODE 01 */
RP5C01_1_HOUR = 0x4, /* MODE 00 and MODE 01 */
RP5C01_10_HOUR = 0x5, /* MODE 00 and MODE 01 */
RP5C01_DAY_OF_WEEK = 0x6, /* MODE 00 and MODE 01 */
RP5C01_1_DAY = 0x7, /* MODE 00 and MODE 01 */
RP5C01_10_DAY = 0x8, /* MODE 00 and MODE 01 */
RP5C01_1_MONTH = 0x9, /* MODE 00 */
RP5C01_10_MONTH = 0xa, /* MODE 00 */
RP5C01_1_YEAR = 0xb, /* MODE 00 */
RP5C01_10_YEAR = 0xc, /* MODE 00 */
RP5C01_12_24_SELECT = 0xa, /* MODE 01 */
RP5C01_LEAP_YEAR = 0xb, /* MODE 01 */
RP5C01_MODE = 0xd, /* all modes */
RP5C01_TEST = 0xe, /* all modes */
RP5C01_RESET = 0xf, /* all modes */
};
#define RP5C01_12_24_SELECT_12 (0 << 0)
#define RP5C01_12_24_SELECT_24 (1 << 0)
#define RP5C01_10_HOUR_AM (0 << 1)
#define RP5C01_10_HOUR_PM (1 << 1)
#define RP5C01_MODE_TIMER_EN (1 << 3) /* timer enable */
#define RP5C01_MODE_ALARM_EN (1 << 2) /* alarm enable */
#define RP5C01_MODE_MODE_MASK (3 << 0)
#define RP5C01_MODE_MODE00 (0 << 0) /* time */
#define RP5C01_MODE_MODE01 (1 << 0) /* alarm, 12h/24h, leap year */
#define RP5C01_MODE_RAM_BLOCK10 (2 << 0) /* RAM 4 bits x 13 */
#define RP5C01_MODE_RAM_BLOCK11 (3 << 0) /* RAM 4 bits x 13 */
#define RP5C01_RESET_1HZ_PULSE (1 << 3)
#define RP5C01_RESET_16HZ_PULSE (1 << 2)
#define RP5C01_RESET_SECOND (1 << 1) /* reset divider stages for */
/* seconds or smaller units */
#define RP5C01_RESET_ALARM (1 << 0) /* reset all alarm registers */
struct rp5c01_priv {
u32 __iomem *regs;
struct rtc_device *rtc;
spinlock_t lock; /* against concurrent RTC/NVRAM access */
};
static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
unsigned int reg)
{
return __raw_readl(&priv->regs[reg]) & 0xf;
}
static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
unsigned int reg)
{
__raw_writel(val, &priv->regs[reg]);
}
static void rp5c01_lock(struct rp5c01_priv *priv)
{
rp5c01_write(priv, RP5C01_MODE_MODE00, RP5C01_MODE);
}
static void rp5c01_unlock(struct rp5c01_priv *priv)
{
rp5c01_write(priv, RP5C01_MODE_TIMER_EN | RP5C01_MODE_MODE01,
RP5C01_MODE);
}
static int rp5c01_read_time(struct device *dev, struct rtc_time *tm)
{
struct rp5c01_priv *priv = dev_get_drvdata(dev);
spin_lock_irq(&priv->lock);
rp5c01_lock(priv);
tm->tm_sec = rp5c01_read(priv, RP5C01_10_SECOND) * 10 +
rp5c01_read(priv, RP5C01_1_SECOND);
tm->tm_min = rp5c01_read(priv, RP5C01_10_MINUTE) * 10 +
rp5c01_read(priv, RP5C01_1_MINUTE);
tm->tm_hour = rp5c01_read(priv, RP5C01_10_HOUR) * 10 +
rp5c01_read(priv, RP5C01_1_HOUR);
tm->tm_mday = rp5c01_read(priv, RP5C01_10_DAY) * 10 +
rp5c01_read(priv, RP5C01_1_DAY);
tm->tm_wday = rp5c01_read(priv, RP5C01_DAY_OF_WEEK);
tm->tm_mon = rp5c01_read(priv, RP5C01_10_MONTH) * 10 +
rp5c01_read(priv, RP5C01_1_MONTH) - 1;
tm->tm_year = rp5c01_read(priv, RP5C01_10_YEAR) * 10 +
rp5c01_read(priv, RP5C01_1_YEAR);
if (tm->tm_year <= 69)
tm->tm_year += 100;
rp5c01_unlock(priv);
spin_unlock_irq(&priv->lock);
return 0;
}
static int rp5c01_set_time(struct device *dev, struct rtc_time *tm)
{
struct rp5c01_priv *priv = dev_get_drvdata(dev);
spin_lock_irq(&priv->lock);
rp5c01_lock(priv);
rp5c01_write(priv, tm->tm_sec / 10, RP5C01_10_SECOND);
rp5c01_write(priv, tm->tm_sec % 10, RP5C01_1_SECOND);
rp5c01_write(priv, tm->tm_min / 10, RP5C01_10_MINUTE);
rp5c01_write(priv, tm->tm_min % 10, RP5C01_1_MINUTE);
rp5c01_write(priv, tm->tm_hour / 10, RP5C01_10_HOUR);
rp5c01_write(priv, tm->tm_hour % 10, RP5C01_1_HOUR);
rp5c01_write(priv, tm->tm_mday / 10, RP5C01_10_DAY);
rp5c01_write(priv, tm->tm_mday % 10, RP5C01_1_DAY);
if (tm->tm_wday != -1)
rp5c01_write(priv, tm->tm_wday, RP5C01_DAY_OF_WEEK);
rp5c01_write(priv, (tm->tm_mon + 1) / 10, RP5C01_10_MONTH);
rp5c01_write(priv, (tm->tm_mon + 1) % 10, RP5C01_1_MONTH);
if (tm->tm_year >= 100)
tm->tm_year -= 100;
rp5c01_write(priv, tm->tm_year / 10, RP5C01_10_YEAR);
rp5c01_write(priv, tm->tm_year % 10, RP5C01_1_YEAR);
rp5c01_unlock(priv);
spin_unlock_irq(&priv->lock);
return 0;
}
static const struct rtc_class_ops rp5c01_rtc_ops = {
.read_time = rp5c01_read_time,
.set_time = rp5c01_set_time,
};
/*
* The NVRAM is organized as 2 blocks of 13 nibbles of 4 bits.
* We provide access to them like AmigaOS does: the high nibble of each 8-bit
* byte is stored in BLOCK10, the low nibble in BLOCK11.
*/
static int rp5c01_nvram_read(void *_priv, unsigned int pos, void *val,
size_t bytes)
{
struct rp5c01_priv *priv = _priv;
u8 *buf = val;
spin_lock_irq(&priv->lock);
for (; bytes; bytes--) {
u8 data;
rp5c01_write(priv,
RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK10,
RP5C01_MODE);
data = rp5c01_read(priv, pos) << 4;
rp5c01_write(priv,
RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK11,
RP5C01_MODE);
data |= rp5c01_read(priv, pos++);
rp5c01_write(priv, RP5C01_MODE_TIMER_EN | RP5C01_MODE_MODE01,
RP5C01_MODE);
*buf++ = data;
}
spin_unlock_irq(&priv->lock);
return 0;
}
static int rp5c01_nvram_write(void *_priv, unsigned int pos, void *val,
size_t bytes)
{
struct rp5c01_priv *priv = _priv;
u8 *buf = val;
spin_lock_irq(&priv->lock);
for (; bytes; bytes--) {
u8 data = *buf++;
rp5c01_write(priv,
RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK10,
RP5C01_MODE);
rp5c01_write(priv, data >> 4, pos);
rp5c01_write(priv,
RP5C01_MODE_TIMER_EN | RP5C01_MODE_RAM_BLOCK11,
RP5C01_MODE);
rp5c01_write(priv, data & 0xf, pos++);
rp5c01_write(priv, RP5C01_MODE_TIMER_EN | RP5C01_MODE_MODE01,
RP5C01_MODE);
}
spin_unlock_irq(&priv->lock);
return 0;
}
static int __init rp5c01_rtc_probe(struct platform_device *dev)
{
struct resource *res;
struct rp5c01_priv *priv;
struct rtc_device *rtc;
int error;
struct nvmem_config nvmem_cfg = {
.name = "rp5c01_nvram",
.word_size = 1,
.stride = 1,
.size = RP5C01_MODE,
.reg_read = rp5c01_nvram_read,
.reg_write = rp5c01_nvram_write,
};
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = devm_ioremap(&dev->dev, res->start, resource_size(res));
if (!priv->regs)
return -ENOMEM;
spin_lock_init(&priv->lock);
platform_set_drvdata(dev, priv);
rtc = devm_rtc_allocate_device(&dev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
rtc->ops = &rp5c01_rtc_ops;
priv->rtc = rtc;
nvmem_cfg.priv = priv;
error = devm_rtc_nvmem_register(rtc, &nvmem_cfg);
if (error)
return error;
return devm_rtc_register_device(rtc);
}
static struct platform_driver rp5c01_rtc_driver = {
.driver = {
.name = "rtc-rp5c01",
},
};
module_platform_driver_probe(rp5c01_rtc_driver, rp5c01_rtc_probe);
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ricoh RP5C01 RTC driver");
MODULE_ALIAS("platform:rtc-rp5c01");
| linux-master | drivers/rtc/rtc-rp5c01.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc-tps6586x.c: RTC driver for TI PMIC TPS6586X
*
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mfd/tps6586x.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#define RTC_CTRL 0xc0
#define POR_RESET_N BIT(7)
#define OSC_SRC_SEL BIT(6)
#define RTC_ENABLE BIT(5) /* enables alarm */
#define RTC_BUF_ENABLE BIT(4) /* 32 KHz buffer enable */
#define PRE_BYPASS BIT(3) /* 0=1KHz or 1=32KHz updates */
#define CL_SEL_MASK (BIT(2)|BIT(1))
#define CL_SEL_POS 1
#define RTC_ALARM1_HI 0xc1
#define RTC_COUNT4 0xc6
/* start a PMU RTC access by reading the register prior to the RTC_COUNT4 */
#define RTC_COUNT4_DUMMYREAD 0xc5
/*only 14-bits width in second*/
#define ALM1_VALID_RANGE_IN_SEC 0x3FFF
#define TPS6586X_RTC_CL_SEL_1_5PF 0x0
#define TPS6586X_RTC_CL_SEL_6_5PF 0x1
#define TPS6586X_RTC_CL_SEL_7_5PF 0x2
#define TPS6586X_RTC_CL_SEL_12_5PF 0x3
struct tps6586x_rtc {
struct device *dev;
struct rtc_device *rtc;
int irq;
bool irq_en;
};
static inline struct device *to_tps6586x_dev(struct device *dev)
{
return dev->parent;
}
static int tps6586x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct device *tps_dev = to_tps6586x_dev(dev);
unsigned long long ticks = 0;
time64_t seconds;
u8 buff[6];
int ret;
int i;
ret = tps6586x_reads(tps_dev, RTC_COUNT4_DUMMYREAD, sizeof(buff), buff);
if (ret < 0) {
dev_err(dev, "read counter failed with err %d\n", ret);
return ret;
}
for (i = 1; i < sizeof(buff); i++) {
ticks <<= 8;
ticks |= buff[i];
}
seconds = ticks >> 10;
rtc_time64_to_tm(seconds, tm);
return 0;
}
static int tps6586x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct device *tps_dev = to_tps6586x_dev(dev);
unsigned long long ticks;
time64_t seconds;
u8 buff[5];
int ret;
seconds = rtc_tm_to_time64(tm);
ticks = (unsigned long long)seconds << 10;
buff[0] = (ticks >> 32) & 0xff;
buff[1] = (ticks >> 24) & 0xff;
buff[2] = (ticks >> 16) & 0xff;
buff[3] = (ticks >> 8) & 0xff;
buff[4] = ticks & 0xff;
/* Disable RTC before changing time */
ret = tps6586x_clr_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
if (ret < 0) {
dev_err(dev, "failed to clear RTC_ENABLE\n");
return ret;
}
ret = tps6586x_writes(tps_dev, RTC_COUNT4, sizeof(buff), buff);
if (ret < 0) {
dev_err(dev, "failed to program new time\n");
return ret;
}
/* Enable RTC */
ret = tps6586x_set_bits(tps_dev, RTC_CTRL, RTC_ENABLE);
if (ret < 0) {
dev_err(dev, "failed to set RTC_ENABLE\n");
return ret;
}
return 0;
}
static int tps6586x_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
if (enabled && !rtc->irq_en) {
enable_irq(rtc->irq);
rtc->irq_en = true;
} else if (!enabled && rtc->irq_en) {
disable_irq(rtc->irq);
rtc->irq_en = false;
}
return 0;
}
static int tps6586x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct device *tps_dev = to_tps6586x_dev(dev);
time64_t seconds;
unsigned long ticks;
unsigned long rtc_current_time;
unsigned long long rticks = 0;
u8 buff[3];
u8 rbuff[6];
int ret;
int i;
seconds = rtc_tm_to_time64(&alrm->time);
ret = tps6586x_rtc_alarm_irq_enable(dev, alrm->enabled);
if (ret < 0) {
dev_err(dev, "can't set alarm irq, err %d\n", ret);
return ret;
}
ret = tps6586x_reads(tps_dev, RTC_COUNT4_DUMMYREAD,
sizeof(rbuff), rbuff);
if (ret < 0) {
dev_err(dev, "read counter failed with err %d\n", ret);
return ret;
}
for (i = 1; i < sizeof(rbuff); i++) {
rticks <<= 8;
rticks |= rbuff[i];
}
rtc_current_time = rticks >> 10;
if ((seconds - rtc_current_time) > ALM1_VALID_RANGE_IN_SEC)
seconds = rtc_current_time - 1;
ticks = (unsigned long long)seconds << 10;
buff[0] = (ticks >> 16) & 0xff;
buff[1] = (ticks >> 8) & 0xff;
buff[2] = ticks & 0xff;
ret = tps6586x_writes(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
if (ret)
dev_err(dev, "programming alarm failed with err %d\n", ret);
return ret;
}
static int tps6586x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct device *tps_dev = to_tps6586x_dev(dev);
unsigned long ticks;
time64_t seconds;
u8 buff[3];
int ret;
ret = tps6586x_reads(tps_dev, RTC_ALARM1_HI, sizeof(buff), buff);
if (ret) {
dev_err(dev, "read RTC_ALARM1_HI failed with err %d\n", ret);
return ret;
}
ticks = (buff[0] << 16) | (buff[1] << 8) | buff[2];
seconds = ticks >> 10;
rtc_time64_to_tm(seconds, &alrm->time);
return 0;
}
static const struct rtc_class_ops tps6586x_rtc_ops = {
.read_time = tps6586x_rtc_read_time,
.set_time = tps6586x_rtc_set_time,
.set_alarm = tps6586x_rtc_set_alarm,
.read_alarm = tps6586x_rtc_read_alarm,
.alarm_irq_enable = tps6586x_rtc_alarm_irq_enable,
};
static irqreturn_t tps6586x_rtc_irq(int irq, void *data)
{
struct tps6586x_rtc *rtc = data;
rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static int tps6586x_rtc_probe(struct platform_device *pdev)
{
struct device *tps_dev = to_tps6586x_dev(&pdev->dev);
struct tps6586x_rtc *rtc;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->dev = &pdev->dev;
rtc->irq = platform_get_irq(pdev, 0);
/* 1 kHz tick mode, enable tick counting */
ret = tps6586x_update(tps_dev, RTC_CTRL,
RTC_ENABLE | OSC_SRC_SEL |
((TPS6586X_RTC_CL_SEL_1_5PF << CL_SEL_POS) & CL_SEL_MASK),
RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
if (ret < 0) {
dev_err(&pdev->dev, "unable to start counter\n");
return ret;
}
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, rtc);
rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
goto fail_rtc_register;
}
rtc->rtc->ops = &tps6586x_rtc_ops;
rtc->rtc->range_max = (1ULL << 30) - 1; /* 30-bit seconds */
rtc->rtc->alarm_offset_max = ALM1_VALID_RANGE_IN_SEC;
rtc->rtc->start_secs = mktime64(2009, 1, 1, 0, 0, 0);
rtc->rtc->set_start_time = true;
irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
tps6586x_rtc_irq,
IRQF_ONESHOT,
dev_name(&pdev->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "request IRQ(%d) failed with ret %d\n",
rtc->irq, ret);
goto fail_rtc_register;
}
ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto fail_rtc_register;
return 0;
fail_rtc_register:
tps6586x_update(tps_dev, RTC_CTRL, 0,
RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
return ret;
};
static void tps6586x_rtc_remove(struct platform_device *pdev)
{
struct device *tps_dev = to_tps6586x_dev(&pdev->dev);
tps6586x_update(tps_dev, RTC_CTRL, 0,
RTC_ENABLE | OSC_SRC_SEL | PRE_BYPASS | CL_SEL_MASK);
}
#ifdef CONFIG_PM_SLEEP
static int tps6586x_rtc_suspend(struct device *dev)
{
struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(rtc->irq);
return 0;
}
static int tps6586x_rtc_resume(struct device *dev)
{
struct tps6586x_rtc *rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_rtc_suspend,
tps6586x_rtc_resume);
static struct platform_driver tps6586x_rtc_driver = {
.driver = {
.name = "tps6586x-rtc",
.pm = &tps6586x_pm_ops,
},
.probe = tps6586x_rtc_probe,
.remove_new = tps6586x_rtc_remove,
};
module_platform_driver(tps6586x_rtc_driver);
MODULE_ALIAS("platform:tps6586x-rtc");
MODULE_DESCRIPTION("TI TPS6586x RTC driver");
MODULE_AUTHOR("Laxman dewangan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-tps6586x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* I2C client/driver for the ST M41T80 family of i2c rtc chips.
*
* Author: Alexander Bigga <[email protected]>
*
* Based on m41t00.c by Mark A. Greer <[email protected]>
*
* 2006 (c) mycable GmbH
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bcd.h>
#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/string.h>
#ifdef CONFIG_RTC_DRV_M41T80_WDT
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/miscdevice.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
#endif
#define M41T80_REG_SSEC 0x00
#define M41T80_REG_SEC 0x01
#define M41T80_REG_MIN 0x02
#define M41T80_REG_HOUR 0x03
#define M41T80_REG_WDAY 0x04
#define M41T80_REG_DAY 0x05
#define M41T80_REG_MON 0x06
#define M41T80_REG_YEAR 0x07
#define M41T80_REG_ALARM_MON 0x0a
#define M41T80_REG_ALARM_DAY 0x0b
#define M41T80_REG_ALARM_HOUR 0x0c
#define M41T80_REG_ALARM_MIN 0x0d
#define M41T80_REG_ALARM_SEC 0x0e
#define M41T80_REG_FLAGS 0x0f
#define M41T80_REG_SQW 0x13
#define M41T80_DATETIME_REG_SIZE (M41T80_REG_YEAR + 1)
#define M41T80_ALARM_REG_SIZE \
(M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
#define M41T80_SQW_MAX_FREQ 32768
#define M41T80_SEC_ST BIT(7) /* ST: Stop Bit */
#define M41T80_ALMON_AFE BIT(7) /* AFE: AF Enable Bit */
#define M41T80_ALMON_SQWE BIT(6) /* SQWE: SQW Enable Bit */
#define M41T80_ALHOUR_HT BIT(6) /* HT: Halt Update Bit */
#define M41T80_FLAGS_OF BIT(2) /* OF: Oscillator Failure Bit */
#define M41T80_FLAGS_AF BIT(6) /* AF: Alarm Flag Bit */
#define M41T80_FLAGS_BATT_LOW BIT(4) /* BL: Battery Low Bit */
#define M41T80_WATCHDOG_RB2 BIT(7) /* RB: Watchdog resolution */
#define M41T80_WATCHDOG_RB1 BIT(1) /* RB: Watchdog resolution */
#define M41T80_WATCHDOG_RB0 BIT(0) /* RB: Watchdog resolution */
#define M41T80_FEATURE_HT BIT(0) /* Halt feature */
#define M41T80_FEATURE_BL BIT(1) /* Battery low indicator */
#define M41T80_FEATURE_SQ BIT(2) /* Squarewave feature */
#define M41T80_FEATURE_WD BIT(3) /* Extra watchdog resolution */
#define M41T80_FEATURE_SQ_ALT BIT(4) /* RSx bits are in reg 4 */
static const struct i2c_device_id m41t80_id[] = {
{ "m41t62", M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT },
{ "m41t65", M41T80_FEATURE_HT | M41T80_FEATURE_WD },
{ "m41t80", M41T80_FEATURE_SQ },
{ "m41t81", M41T80_FEATURE_HT | M41T80_FEATURE_SQ},
{ "m41t81s", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ },
{ "m41t82", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ },
{ "m41t83", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ },
{ "m41st84", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ },
{ "m41st85", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ },
{ "m41st87", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ },
{ "rv4162", M41T80_FEATURE_SQ | M41T80_FEATURE_WD | M41T80_FEATURE_SQ_ALT },
{ }
};
MODULE_DEVICE_TABLE(i2c, m41t80_id);
static const __maybe_unused struct of_device_id m41t80_of_match[] = {
{
.compatible = "st,m41t62",
.data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT)
},
{
.compatible = "st,m41t65",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_WD)
},
{
.compatible = "st,m41t80",
.data = (void *)(M41T80_FEATURE_SQ)
},
{
.compatible = "st,m41t81",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_SQ)
},
{
.compatible = "st,m41t81s",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ)
},
{
.compatible = "st,m41t82",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ)
},
{
.compatible = "st,m41t83",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ)
},
{
.compatible = "st,m41t84",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ)
},
{
.compatible = "st,m41t85",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ)
},
{
.compatible = "st,m41t87",
.data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ)
},
{
.compatible = "microcrystal,rv4162",
.data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_WD | M41T80_FEATURE_SQ_ALT)
},
/* DT compatibility only, do not use compatibles below: */
{
.compatible = "st,rv4162",
.data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_WD | M41T80_FEATURE_SQ_ALT)
},
{
.compatible = "rv4162",
.data = (void *)(M41T80_FEATURE_SQ | M41T80_FEATURE_WD | M41T80_FEATURE_SQ_ALT)
},
{ }
};
MODULE_DEVICE_TABLE(of, m41t80_of_match);
struct m41t80_data {
unsigned long features;
struct i2c_client *client;
struct rtc_device *rtc;
#ifdef CONFIG_COMMON_CLK
struct clk_hw sqw;
unsigned long freq;
unsigned int sqwe;
#endif
};
static irqreturn_t m41t80_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct m41t80_data *m41t80 = i2c_get_clientdata(client);
unsigned long events = 0;
int flags, flags_afe;
rtc_lock(m41t80->rtc);
flags_afe = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (flags_afe < 0) {
rtc_unlock(m41t80->rtc);
return IRQ_NONE;
}
flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (flags <= 0) {
rtc_unlock(m41t80->rtc);
return IRQ_NONE;
}
if (flags & M41T80_FLAGS_AF) {
flags &= ~M41T80_FLAGS_AF;
flags_afe &= ~M41T80_ALMON_AFE;
events |= RTC_AF;
}
if (events) {
rtc_update_irq(m41t80->rtc, 1, events);
i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS, flags);
i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
flags_afe);
}
rtc_unlock(m41t80->rtc);
return IRQ_HANDLED;
}
static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[8];
int err, flags;
flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (flags < 0)
return flags;
if (flags & M41T80_FLAGS_OF) {
dev_err(&client->dev, "Oscillator failure, data is invalid.\n");
return -EINVAL;
}
err = i2c_smbus_read_i2c_block_data(client, M41T80_REG_SSEC,
sizeof(buf), buf);
if (err < 0) {
dev_err(&client->dev, "Unable to read date\n");
return err;
}
tm->tm_sec = bcd2bin(buf[M41T80_REG_SEC] & 0x7f);
tm->tm_min = bcd2bin(buf[M41T80_REG_MIN] & 0x7f);
tm->tm_hour = bcd2bin(buf[M41T80_REG_HOUR] & 0x3f);
tm->tm_mday = bcd2bin(buf[M41T80_REG_DAY] & 0x3f);
tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07;
tm->tm_mon = bcd2bin(buf[M41T80_REG_MON] & 0x1f) - 1;
/* assume 20YY not 19YY, and ignore the Century Bit */
tm->tm_year = bcd2bin(buf[M41T80_REG_YEAR]) + 100;
return 0;
}
static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
struct m41t80_data *clientdata = i2c_get_clientdata(client);
unsigned char buf[8];
int err, flags;
buf[M41T80_REG_SSEC] = 0;
buf[M41T80_REG_SEC] = bin2bcd(tm->tm_sec);
buf[M41T80_REG_MIN] = bin2bcd(tm->tm_min);
buf[M41T80_REG_HOUR] = bin2bcd(tm->tm_hour);
buf[M41T80_REG_DAY] = bin2bcd(tm->tm_mday);
buf[M41T80_REG_MON] = bin2bcd(tm->tm_mon + 1);
buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100);
buf[M41T80_REG_WDAY] = tm->tm_wday;
/* If the square wave output is controlled in the weekday register */
if (clientdata->features & M41T80_FEATURE_SQ_ALT) {
int val;
val = i2c_smbus_read_byte_data(client, M41T80_REG_WDAY);
if (val < 0)
return val;
buf[M41T80_REG_WDAY] |= (val & 0xf0);
}
err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC,
sizeof(buf), buf);
if (err < 0) {
dev_err(&client->dev, "Unable to write to date registers\n");
return err;
}
/* Clear the OF bit of Flags Register */
flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (flags < 0)
return flags;
err = i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS,
flags & ~M41T80_FLAGS_OF);
if (err < 0) {
dev_err(&client->dev, "Unable to write flags register\n");
return err;
}
return err;
}
static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct i2c_client *client = to_i2c_client(dev);
struct m41t80_data *clientdata = i2c_get_clientdata(client);
int reg;
if (clientdata->features & M41T80_FEATURE_BL) {
reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (reg < 0)
return reg;
seq_printf(seq, "battery\t\t: %s\n",
(reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
}
return 0;
}
static int m41t80_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
int flags, retval;
flags = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (flags < 0)
return flags;
if (enabled)
flags |= M41T80_ALMON_AFE;
else
flags &= ~M41T80_ALMON_AFE;
retval = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, flags);
if (retval < 0) {
dev_err(dev, "Unable to enable alarm IRQ %d\n", retval);
return retval;
}
return 0;
}
static int m41t80_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct i2c_client *client = to_i2c_client(dev);
u8 alarmvals[5];
int ret, err;
alarmvals[0] = bin2bcd(alrm->time.tm_mon + 1);
alarmvals[1] = bin2bcd(alrm->time.tm_mday);
alarmvals[2] = bin2bcd(alrm->time.tm_hour);
alarmvals[3] = bin2bcd(alrm->time.tm_min);
alarmvals[4] = bin2bcd(alrm->time.tm_sec);
/* Clear AF and AFE flags */
ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (ret < 0)
return ret;
err = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
ret & ~(M41T80_ALMON_AFE));
if (err < 0) {
dev_err(dev, "Unable to clear AFE bit\n");
return err;
}
/* Keep SQWE bit value */
alarmvals[0] |= (ret & M41T80_ALMON_SQWE);
ret = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (ret < 0)
return ret;
err = i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS,
ret & ~(M41T80_FLAGS_AF));
if (err < 0) {
dev_err(dev, "Unable to clear AF bit\n");
return err;
}
/* Write the alarm */
err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_ALARM_MON,
5, alarmvals);
if (err)
return err;
/* Enable the alarm interrupt */
if (alrm->enabled) {
alarmvals[0] |= M41T80_ALMON_AFE;
err = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
alarmvals[0]);
if (err)
return err;
}
return 0;
}
static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct i2c_client *client = to_i2c_client(dev);
u8 alarmvals[5];
int flags, ret;
ret = i2c_smbus_read_i2c_block_data(client, M41T80_REG_ALARM_MON,
5, alarmvals);
if (ret != 5)
return ret < 0 ? ret : -EIO;
flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
if (flags < 0)
return flags;
alrm->time.tm_sec = bcd2bin(alarmvals[4] & 0x7f);
alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1;
alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
return 0;
}
static const struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
.proc = m41t80_rtc_proc,
.read_alarm = m41t80_read_alarm,
.set_alarm = m41t80_set_alarm,
.alarm_irq_enable = m41t80_alarm_irq_enable,
};
#ifdef CONFIG_PM_SLEEP
static int m41t80_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
if (client->irq >= 0 && device_may_wakeup(dev))
enable_irq_wake(client->irq);
return 0;
}
static int m41t80_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
if (client->irq >= 0 && device_may_wakeup(dev))
disable_irq_wake(client->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
#ifdef CONFIG_COMMON_CLK
#define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
static unsigned long m41t80_decode_freq(int setting)
{
return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
M41T80_SQW_MAX_FREQ >> setting;
}
static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
{
struct i2c_client *client = m41t80->client;
int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
M41T80_REG_WDAY : M41T80_REG_SQW;
int ret = i2c_smbus_read_byte_data(client, reg_sqw);
if (ret < 0)
return 0;
return m41t80_decode_freq(ret >> 4);
}
static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return sqw_to_m41t80_data(hw)->freq;
}
static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
if (rate >= M41T80_SQW_MAX_FREQ)
return M41T80_SQW_MAX_FREQ;
if (rate >= M41T80_SQW_MAX_FREQ / 4)
return M41T80_SQW_MAX_FREQ / 4;
if (!rate)
return 0;
return 1 << ilog2(rate);
}
static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
struct i2c_client *client = m41t80->client;
int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
M41T80_REG_WDAY : M41T80_REG_SQW;
int reg, ret, val = 0;
if (rate >= M41T80_SQW_MAX_FREQ)
val = 1;
else if (rate >= M41T80_SQW_MAX_FREQ / 4)
val = 2;
else if (rate)
val = 15 - ilog2(rate);
reg = i2c_smbus_read_byte_data(client, reg_sqw);
if (reg < 0)
return reg;
reg = (reg & 0x0f) | (val << 4);
ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
if (!ret)
m41t80->freq = m41t80_decode_freq(val);
return ret;
}
static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
{
struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
struct i2c_client *client = m41t80->client;
int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (ret < 0)
return ret;
if (enable)
ret |= M41T80_ALMON_SQWE;
else
ret &= ~M41T80_ALMON_SQWE;
ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
if (!ret)
m41t80->sqwe = enable;
return ret;
}
static int m41t80_sqw_prepare(struct clk_hw *hw)
{
return m41t80_sqw_control(hw, 1);
}
static void m41t80_sqw_unprepare(struct clk_hw *hw)
{
m41t80_sqw_control(hw, 0);
}
static int m41t80_sqw_is_prepared(struct clk_hw *hw)
{
return sqw_to_m41t80_data(hw)->sqwe;
}
static const struct clk_ops m41t80_sqw_ops = {
.prepare = m41t80_sqw_prepare,
.unprepare = m41t80_sqw_unprepare,
.is_prepared = m41t80_sqw_is_prepared,
.recalc_rate = m41t80_sqw_recalc_rate,
.round_rate = m41t80_sqw_round_rate,
.set_rate = m41t80_sqw_set_rate,
};
static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
{
struct i2c_client *client = m41t80->client;
struct device_node *node = client->dev.of_node;
struct device_node *fixed_clock;
struct clk *clk;
struct clk_init_data init;
int ret;
fixed_clock = of_get_child_by_name(node, "clock");
if (fixed_clock) {
/*
* skip registering square wave clock when a fixed
* clock has been registered. The fixed clock is
* registered automatically when being referenced.
*/
of_node_put(fixed_clock);
return NULL;
}
/* First disable the clock */
ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (ret < 0)
return ERR_PTR(ret);
ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
ret & ~(M41T80_ALMON_SQWE));
if (ret < 0)
return ERR_PTR(ret);
init.name = "m41t80-sqw";
init.ops = &m41t80_sqw_ops;
init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
m41t80->sqw.init = &init;
m41t80->freq = m41t80_get_freq(m41t80);
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
/* register the clock */
clk = clk_register(&client->dev, &m41t80->sqw);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
#endif
#ifdef CONFIG_RTC_DRV_M41T80_WDT
/*
*****************************************************************************
*
* Watchdog Driver
*
*****************************************************************************
*/
static DEFINE_MUTEX(m41t80_rtc_mutex);
static struct i2c_client *save_client;
/* Default margin */
#define WD_TIMO 60 /* 1..31 seconds */
static int wdt_margin = WD_TIMO;
module_param(wdt_margin, int, 0);
MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 60s)");
static unsigned long wdt_is_open;
static int boot_flag;
/**
* wdt_ping - Reload counter one with the watchdog timeout.
* We don't bother reloading the cascade counter.
*/
static void wdt_ping(void)
{
unsigned char i2c_data[2];
struct i2c_msg msgs1[1] = {
{
.addr = save_client->addr,
.flags = 0,
.len = 2,
.buf = i2c_data,
},
};
struct m41t80_data *clientdata = i2c_get_clientdata(save_client);
i2c_data[0] = 0x09; /* watchdog register */
if (wdt_margin > 31)
i2c_data[1] = (wdt_margin & 0xFC) | 0x83; /* resolution = 4s */
else
/*
* WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02)
*/
i2c_data[1] = wdt_margin << 2 | 0x82;
/*
* M41T65 has three bits for watchdog resolution. Don't set bit 7, as
* that would be an invalid resolution.
*/
if (clientdata->features & M41T80_FEATURE_WD)
i2c_data[1] &= ~M41T80_WATCHDOG_RB2;
i2c_transfer(save_client->adapter, msgs1, 1);
}
/**
* wdt_disable - disables watchdog.
*/
static void wdt_disable(void)
{
unsigned char i2c_data[2], i2c_buf[0x10];
struct i2c_msg msgs0[2] = {
{
.addr = save_client->addr,
.flags = 0,
.len = 1,
.buf = i2c_data,
},
{
.addr = save_client->addr,
.flags = I2C_M_RD,
.len = 1,
.buf = i2c_buf,
},
};
struct i2c_msg msgs1[1] = {
{
.addr = save_client->addr,
.flags = 0,
.len = 2,
.buf = i2c_data,
},
};
i2c_data[0] = 0x09;
i2c_transfer(save_client->adapter, msgs0, 2);
i2c_data[0] = 0x09;
i2c_data[1] = 0x00;
i2c_transfer(save_client->adapter, msgs1, 1);
}
/**
* wdt_write - write to watchdog.
* @file: file handle to the watchdog
* @buf: buffer to write (unused as data does not matter here
* @count: count of bytes
* @ppos: pointer to the position to write. No seeks allowed
*
* A write to a watchdog device is defined as a keepalive signal. Any
* write of data will do, as we don't define content meaning.
*/
static ssize_t wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count) {
wdt_ping();
return 1;
}
return 0;
}
static ssize_t wdt_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return 0;
}
/**
* wdt_ioctl - ioctl handler to set watchdog.
* @file: file handle to the device
* @cmd: watchdog command
* @arg: argument pointer
*
* The watchdog API defines a common set of functions for all watchdogs
* according to their available features. We only actually usefully support
* querying capabilities and current status.
*/
static int wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int new_margin, rv;
static struct watchdog_info ident = {
.options = WDIOF_POWERUNDER | WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT,
.firmware_version = 1,
.identity = "M41T80 WTD"
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user((struct watchdog_info __user *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(boot_flag, (int __user *)arg);
case WDIOC_KEEPALIVE:
wdt_ping();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, (int __user *)arg))
return -EFAULT;
/* Arbitrary, can't find the card's limits */
if (new_margin < 1 || new_margin > 124)
return -EINVAL;
wdt_margin = new_margin;
wdt_ping();
fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(wdt_margin, (int __user *)arg);
case WDIOC_SETOPTIONS:
if (copy_from_user(&rv, (int __user *)arg, sizeof(int)))
return -EFAULT;
if (rv & WDIOS_DISABLECARD) {
pr_info("disable watchdog\n");
wdt_disable();
}
if (rv & WDIOS_ENABLECARD) {
pr_info("enable watchdog\n");
wdt_ping();
}
return -EINVAL;
}
return -ENOTTY;
}
static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
mutex_lock(&m41t80_rtc_mutex);
ret = wdt_ioctl(file, cmd, arg);
mutex_unlock(&m41t80_rtc_mutex);
return ret;
}
/**
* wdt_open - open a watchdog.
* @inode: inode of device
* @file: file handle to device
*
*/
static int wdt_open(struct inode *inode, struct file *file)
{
if (iminor(inode) == WATCHDOG_MINOR) {
mutex_lock(&m41t80_rtc_mutex);
if (test_and_set_bit(0, &wdt_is_open)) {
mutex_unlock(&m41t80_rtc_mutex);
return -EBUSY;
}
/*
* Activate
*/
wdt_is_open = 1;
mutex_unlock(&m41t80_rtc_mutex);
return stream_open(inode, file);
}
return -ENODEV;
}
/**
* wdt_release - release a watchdog.
* @inode: inode to board
* @file: file handle to board
*
*/
static int wdt_release(struct inode *inode, struct file *file)
{
if (iminor(inode) == WATCHDOG_MINOR)
clear_bit(0, &wdt_is_open);
return 0;
}
/**
* wdt_notify_sys - notify to watchdog.
* @this: our notifier block
* @code: the event being reported
* @unused: unused
*
* Our notifier is called on system shutdowns. We want to turn the card
* off at reboot otherwise the machine will reboot again during memory
* test or worse yet during the following fsck. This would suck, in fact
* trust me - if it happens it does suck.
*/
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
/* Disable Watchdog */
wdt_disable();
return NOTIFY_DONE;
}
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.read = wdt_read,
.unlocked_ioctl = wdt_unlocked_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.write = wdt_write,
.open = wdt_open,
.release = wdt_release,
.llseek = no_llseek,
};
static struct miscdevice wdt_dev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
/*
* The WDT card needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
#endif /* CONFIG_RTC_DRV_M41T80_WDT */
/*
*****************************************************************************
*
* Driver Interface
*
*****************************************************************************
*/
static int m41t80_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
int rc = 0;
struct rtc_time tm;
struct m41t80_data *m41t80_data = NULL;
bool wakeup_source = false;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev, "doesn't support I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK\n");
return -ENODEV;
}
m41t80_data = devm_kzalloc(&client->dev, sizeof(*m41t80_data),
GFP_KERNEL);
if (!m41t80_data)
return -ENOMEM;
m41t80_data->client = client;
if (client->dev.of_node) {
m41t80_data->features = (unsigned long)
of_device_get_match_data(&client->dev);
} else {
const struct i2c_device_id *id = i2c_match_id(m41t80_id, client);
m41t80_data->features = id->driver_data;
}
i2c_set_clientdata(client, m41t80_data);
m41t80_data->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(m41t80_data->rtc))
return PTR_ERR(m41t80_data->rtc);
#ifdef CONFIG_OF
wakeup_source = of_property_read_bool(client->dev.of_node,
"wakeup-source");
#endif
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&client->dev))
irqflags = 0;
rc = devm_request_threaded_irq(&client->dev, client->irq,
NULL, m41t80_handle_irq,
irqflags | IRQF_ONESHOT,
"m41t80", client);
if (rc) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
wakeup_source = false;
}
}
if (client->irq > 0 || wakeup_source)
device_init_wakeup(&client->dev, true);
else
clear_bit(RTC_FEATURE_ALARM, m41t80_data->rtc->features);
m41t80_data->rtc->ops = &m41t80_rtc_ops;
m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq <= 0)
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, m41t80_data->rtc->features);
/* Make sure HT (Halt Update) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
if (rc >= 0 && rc & M41T80_ALHOUR_HT) {
if (m41t80_data->features & M41T80_FEATURE_HT) {
m41t80_rtc_read_time(&client->dev, &tm);
dev_info(&client->dev, "HT bit was set!\n");
dev_info(&client->dev, "Power Down at %ptR\n", &tm);
}
rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_HOUR,
rc & ~M41T80_ALHOUR_HT);
}
if (rc < 0) {
dev_err(&client->dev, "Can't clear HT bit\n");
return rc;
}
/* Make sure ST (stop) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
if (rc >= 0 && rc & M41T80_SEC_ST)
rc = i2c_smbus_write_byte_data(client, M41T80_REG_SEC,
rc & ~M41T80_SEC_ST);
if (rc < 0) {
dev_err(&client->dev, "Can't clear ST bit\n");
return rc;
}
#ifdef CONFIG_RTC_DRV_M41T80_WDT
if (m41t80_data->features & M41T80_FEATURE_HT) {
save_client = client;
rc = misc_register(&wdt_dev);
if (rc)
return rc;
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
misc_deregister(&wdt_dev);
return rc;
}
}
#endif
#ifdef CONFIG_COMMON_CLK
if (m41t80_data->features & M41T80_FEATURE_SQ)
m41t80_sqw_register_clk(m41t80_data);
#endif
rc = devm_rtc_register_device(m41t80_data->rtc);
if (rc)
return rc;
return 0;
}
static void m41t80_remove(struct i2c_client *client)
{
#ifdef CONFIG_RTC_DRV_M41T80_WDT
struct m41t80_data *clientdata = i2c_get_clientdata(client);
if (clientdata->features & M41T80_FEATURE_HT) {
misc_deregister(&wdt_dev);
unregister_reboot_notifier(&wdt_notifier);
}
#endif
}
static struct i2c_driver m41t80_driver = {
.driver = {
.name = "rtc-m41t80",
.of_match_table = of_match_ptr(m41t80_of_match),
.pm = &m41t80_pm,
},
.probe = m41t80_probe,
.remove = m41t80_remove,
.id_table = m41t80_id,
};
module_i2c_driver(m41t80_driver);
MODULE_AUTHOR("Alexander Bigga <[email protected]>");
MODULE_DESCRIPTION("ST Microelectronics M41T80 series RTC I2C Client Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-m41t80.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RTC subsystem, dev interface
*
* Copyright (C) 2005 Tower Technologies
* Author: Alessandro Zummo <[email protected]>
*
* based on arch/arm/common/rtctime.c
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched/signal.h>
#include "rtc-core.h"
static dev_t rtc_devt;
#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
static int rtc_dev_open(struct inode *inode, struct file *file)
{
struct rtc_device *rtc = container_of(inode->i_cdev,
struct rtc_device, char_dev);
if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
return -EBUSY;
file->private_data = rtc;
spin_lock_irq(&rtc->irq_lock);
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
return 0;
}
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
* Routine to poll RTC seconds field for change as often as possible,
* after first RTC_UIE use timer to reduce polling
*/
static void rtc_uie_task(struct work_struct *work)
{
struct rtc_device *rtc =
container_of(work, struct rtc_device, uie_task);
struct rtc_time tm;
int num = 0;
int err;
err = rtc_read_time(rtc, &tm);
spin_lock_irq(&rtc->irq_lock);
if (rtc->stop_uie_polling || err) {
rtc->uie_task_active = 0;
} else if (rtc->oldsecs != tm.tm_sec) {
num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
rtc->oldsecs = tm.tm_sec;
rtc->uie_timer.expires = jiffies + HZ - (HZ / 10);
rtc->uie_timer_active = 1;
rtc->uie_task_active = 0;
add_timer(&rtc->uie_timer);
} else if (schedule_work(&rtc->uie_task) == 0) {
rtc->uie_task_active = 0;
}
spin_unlock_irq(&rtc->irq_lock);
if (num)
rtc_handle_legacy_irq(rtc, num, RTC_UF);
}
static void rtc_uie_timer(struct timer_list *t)
{
struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
unsigned long flags;
spin_lock_irqsave(&rtc->irq_lock, flags);
rtc->uie_timer_active = 0;
rtc->uie_task_active = 1;
if ((schedule_work(&rtc->uie_task) == 0))
rtc->uie_task_active = 0;
spin_unlock_irqrestore(&rtc->irq_lock, flags);
}
static int clear_uie(struct rtc_device *rtc)
{
spin_lock_irq(&rtc->irq_lock);
if (rtc->uie_irq_active) {
rtc->stop_uie_polling = 1;
if (rtc->uie_timer_active) {
spin_unlock_irq(&rtc->irq_lock);
del_timer_sync(&rtc->uie_timer);
spin_lock_irq(&rtc->irq_lock);
rtc->uie_timer_active = 0;
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
flush_work(&rtc->uie_task);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
}
spin_unlock_irq(&rtc->irq_lock);
return 0;
}
static int set_uie(struct rtc_device *rtc)
{
struct rtc_time tm;
int err;
err = rtc_read_time(rtc, &tm);
if (err)
return err;
spin_lock_irq(&rtc->irq_lock);
if (!rtc->uie_irq_active) {
rtc->uie_irq_active = 1;
rtc->stop_uie_polling = 0;
rtc->oldsecs = tm.tm_sec;
rtc->uie_task_active = 1;
if (schedule_work(&rtc->uie_task) == 0)
rtc->uie_task_active = 0;
}
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
return 0;
}
int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
{
if (enabled)
return set_uie(rtc);
else
return clear_uie(rtc);
}
EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
static ssize_t
rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct rtc_device *rtc = file->private_data;
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t ret;
if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
return -EINVAL;
add_wait_queue(&rtc->irq_queue, &wait);
do {
__set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&rtc->irq_lock);
data = rtc->irq_data;
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
if (data != 0) {
ret = 0;
break;
}
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
schedule();
} while (1);
set_current_state(TASK_RUNNING);
remove_wait_queue(&rtc->irq_queue, &wait);
if (ret == 0) {
if (sizeof(int) != sizeof(long) &&
count == sizeof(unsigned int))
ret = put_user(data, (unsigned int __user *)buf) ?:
sizeof(unsigned int);
else
ret = put_user(data, (unsigned long __user *)buf) ?:
sizeof(unsigned long);
}
return ret;
}
static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
{
struct rtc_device *rtc = file->private_data;
unsigned long data;
poll_wait(file, &rtc->irq_queue, wait);
data = rtc->irq_data;
return (data != 0) ? (EPOLLIN | EPOLLRDNORM) : 0;
}
static long rtc_dev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
struct rtc_device *rtc = file->private_data;
const struct rtc_class_ops *ops = rtc->ops;
struct rtc_time tm;
struct rtc_wkalrm alarm;
struct rtc_param param;
void __user *uarg = (void __user *)arg;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
/* check that the calling task has appropriate permissions
* for certain ioctls. doing this check here is useful
* to avoid duplicate code in each driver.
*/
switch (cmd) {
case RTC_EPOCH_SET:
case RTC_SET_TIME:
case RTC_PARAM_SET:
if (!capable(CAP_SYS_TIME))
err = -EACCES;
break;
case RTC_IRQP_SET:
if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
err = -EACCES;
break;
case RTC_PIE_ON:
if (rtc->irq_freq > rtc->max_user_freq &&
!capable(CAP_SYS_RESOURCE))
err = -EACCES;
break;
}
if (err)
goto done;
/*
* Drivers *SHOULD NOT* provide ioctl implementations
* for these requests. Instead, provide methods to
* support the following code, so that the RTC's main
* features are accessible without using ioctls.
*
* RTC and alarm times will be in UTC, by preference,
* but dual-booting with MS-Windows implies RTCs must
* use the local wall clock time.
*/
switch (cmd) {
case RTC_ALM_READ:
mutex_unlock(&rtc->ops_lock);
err = rtc_read_alarm(rtc, &alarm);
if (err < 0)
return err;
if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
err = -EFAULT;
return err;
case RTC_ALM_SET:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
return -EFAULT;
alarm.enabled = 0;
alarm.pending = 0;
alarm.time.tm_wday = -1;
alarm.time.tm_yday = -1;
alarm.time.tm_isdst = -1;
/* RTC_ALM_SET alarms may be up to 24 hours in the future.
* Rather than expecting every RTC to implement "don't care"
* for day/month/year fields, just force the alarm to have
* the right values for those fields.
*
* RTC_WKALM_SET should be used instead. Not only does it
* eliminate the need for a separate RTC_AIE_ON call, it
* doesn't have the "alarm 23:59:59 in the future" race.
*
* NOTE: some legacy code may have used invalid fields as
* wildcards, exposing hardware "periodic alarm" capabilities.
* Not supported here.
*/
{
time64_t now, then;
err = rtc_read_time(rtc, &tm);
if (err < 0)
return err;
now = rtc_tm_to_time64(&tm);
alarm.time.tm_mday = tm.tm_mday;
alarm.time.tm_mon = tm.tm_mon;
alarm.time.tm_year = tm.tm_year;
err = rtc_valid_tm(&alarm.time);
if (err < 0)
return err;
then = rtc_tm_to_time64(&alarm.time);
/* alarm may need to wrap into tomorrow */
if (then < now) {
rtc_time64_to_tm(now + 24 * 60 * 60, &tm);
alarm.time.tm_mday = tm.tm_mday;
alarm.time.tm_mon = tm.tm_mon;
alarm.time.tm_year = tm.tm_year;
}
}
return rtc_set_alarm(rtc, &alarm);
case RTC_RD_TIME:
mutex_unlock(&rtc->ops_lock);
err = rtc_read_time(rtc, &tm);
if (err < 0)
return err;
if (copy_to_user(uarg, &tm, sizeof(tm)))
err = -EFAULT;
return err;
case RTC_SET_TIME:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&tm, uarg, sizeof(tm)))
return -EFAULT;
return rtc_set_time(rtc, &tm);
case RTC_PIE_ON:
err = rtc_irq_set_state(rtc, 1);
break;
case RTC_PIE_OFF:
err = rtc_irq_set_state(rtc, 0);
break;
case RTC_AIE_ON:
mutex_unlock(&rtc->ops_lock);
return rtc_alarm_irq_enable(rtc, 1);
case RTC_AIE_OFF:
mutex_unlock(&rtc->ops_lock);
return rtc_alarm_irq_enable(rtc, 0);
case RTC_UIE_ON:
mutex_unlock(&rtc->ops_lock);
return rtc_update_irq_enable(rtc, 1);
case RTC_UIE_OFF:
mutex_unlock(&rtc->ops_lock);
return rtc_update_irq_enable(rtc, 0);
case RTC_IRQP_SET:
err = rtc_irq_set_freq(rtc, arg);
break;
case RTC_IRQP_READ:
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
break;
case RTC_WKALM_SET:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&alarm, uarg, sizeof(alarm)))
return -EFAULT;
return rtc_set_alarm(rtc, &alarm);
case RTC_WKALM_RD:
mutex_unlock(&rtc->ops_lock);
err = rtc_read_alarm(rtc, &alarm);
if (err < 0)
return err;
if (copy_to_user(uarg, &alarm, sizeof(alarm)))
err = -EFAULT;
return err;
case RTC_PARAM_GET:
if (copy_from_user(¶m, uarg, sizeof(param))) {
mutex_unlock(&rtc->ops_lock);
return -EFAULT;
}
switch(param.param) {
case RTC_PARAM_FEATURES:
if (param.index != 0)
err = -EINVAL;
param.uvalue = rtc->features[0];
break;
case RTC_PARAM_CORRECTION: {
long offset;
mutex_unlock(&rtc->ops_lock);
if (param.index != 0)
return -EINVAL;
err = rtc_read_offset(rtc, &offset);
mutex_lock(&rtc->ops_lock);
if (err == 0)
param.svalue = offset;
break;
}
default:
if (rtc->ops->param_get)
err = rtc->ops->param_get(rtc->dev.parent, ¶m);
else
err = -EINVAL;
}
if (!err)
if (copy_to_user(uarg, ¶m, sizeof(param)))
err = -EFAULT;
break;
case RTC_PARAM_SET:
if (copy_from_user(¶m, uarg, sizeof(param))) {
mutex_unlock(&rtc->ops_lock);
return -EFAULT;
}
switch(param.param) {
case RTC_PARAM_FEATURES:
err = -EINVAL;
break;
case RTC_PARAM_CORRECTION:
mutex_unlock(&rtc->ops_lock);
if (param.index != 0)
return -EINVAL;
return rtc_set_offset(rtc, param.svalue);
default:
if (rtc->ops->param_set)
err = rtc->ops->param_set(rtc->dev.parent, ¶m);
else
err = -EINVAL;
}
break;
default:
/* Finally try the driver's ioctl interface */
if (ops->ioctl) {
err = ops->ioctl(rtc->dev.parent, cmd, arg);
if (err == -ENOIOCTLCMD)
err = -ENOTTY;
} else {
err = -ENOTTY;
}
break;
}
done:
mutex_unlock(&rtc->ops_lock);
return err;
}
#ifdef CONFIG_COMPAT
#define RTC_IRQP_SET32 _IOW('p', 0x0c, __u32)
#define RTC_IRQP_READ32 _IOR('p', 0x0b, __u32)
#define RTC_EPOCH_SET32 _IOW('p', 0x0e, __u32)
static long rtc_dev_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct rtc_device *rtc = file->private_data;
void __user *uarg = compat_ptr(arg);
switch (cmd) {
case RTC_IRQP_READ32:
return put_user(rtc->irq_freq, (__u32 __user *)uarg);
case RTC_IRQP_SET32:
/* arg is a plain integer, not pointer */
return rtc_dev_ioctl(file, RTC_IRQP_SET, arg);
case RTC_EPOCH_SET32:
/* arg is a plain integer, not pointer */
return rtc_dev_ioctl(file, RTC_EPOCH_SET, arg);
}
return rtc_dev_ioctl(file, cmd, (unsigned long)uarg);
}
#endif
static int rtc_dev_fasync(int fd, struct file *file, int on)
{
struct rtc_device *rtc = file->private_data;
return fasync_helper(fd, file, on, &rtc->async_queue);
}
static int rtc_dev_release(struct inode *inode, struct file *file)
{
struct rtc_device *rtc = file->private_data;
/* We shut down the repeating IRQs that userspace enabled,
* since nothing is listening to them.
* - Update (UIE) ... currently only managed through ioctls
* - Periodic (PIE) ... also used through rtc_*() interface calls
*
* Leave the alarm alone; it may be set to trigger a system wakeup
* later, or be used by kernel code, and is a one-shot event anyway.
*/
/* Keep ioctl until all drivers are converted */
rtc_dev_ioctl(file, RTC_UIE_OFF, 0);
rtc_update_irq_enable(rtc, 0);
rtc_irq_set_state(rtc, 0);
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return 0;
}
static const struct file_operations rtc_dev_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = rtc_dev_read,
.poll = rtc_dev_poll,
.unlocked_ioctl = rtc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = rtc_dev_compat_ioctl,
#endif
.open = rtc_dev_open,
.release = rtc_dev_release,
.fasync = rtc_dev_fasync,
};
/* insertion/removal hooks */
void rtc_dev_prepare(struct rtc_device *rtc)
{
if (!rtc_devt)
return;
if (rtc->id >= RTC_DEV_MAX) {
dev_dbg(&rtc->dev, "too many RTC devices\n");
return;
}
rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
INIT_WORK(&rtc->uie_task, rtc_uie_task);
timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
#endif
cdev_init(&rtc->char_dev, &rtc_dev_fops);
rtc->char_dev.owner = rtc->owner;
}
void __init rtc_dev_init(void)
{
int err;
err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
if (err < 0)
pr_err("failed to allocate char dev region\n");
}
| linux-master | drivers/rtc/dev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Loongson RTC driver
*
* Maintained out-of-tree by Huacai Chen <[email protected]>.
* Rewritten for mainline by WANG Xuerui <[email protected]>.
* Binbin Zhou <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/acpi.h>
/* Time Of Year(TOY) counters registers */
#define TOY_TRIM_REG 0x20 /* Must be initialized to 0 */
#define TOY_WRITE0_REG 0x24 /* TOY low 32-bits value (write-only) */
#define TOY_WRITE1_REG 0x28 /* TOY high 32-bits value (write-only) */
#define TOY_READ0_REG 0x2c /* TOY low 32-bits value (read-only) */
#define TOY_READ1_REG 0x30 /* TOY high 32-bits value (read-only) */
#define TOY_MATCH0_REG 0x34 /* TOY timing interrupt 0 */
#define TOY_MATCH1_REG 0x38 /* TOY timing interrupt 1 */
#define TOY_MATCH2_REG 0x3c /* TOY timing interrupt 2 */
/* RTC counters registers */
#define RTC_CTRL_REG 0x40 /* TOY and RTC control register */
#define RTC_TRIM_REG 0x60 /* Must be initialized to 0 */
#define RTC_WRITE0_REG 0x64 /* RTC counters value (write-only) */
#define RTC_READ0_REG 0x68 /* RTC counters value (read-only) */
#define RTC_MATCH0_REG 0x6c /* RTC timing interrupt 0 */
#define RTC_MATCH1_REG 0x70 /* RTC timing interrupt 1 */
#define RTC_MATCH2_REG 0x74 /* RTC timing interrupt 2 */
/* bitmask of TOY_WRITE0_REG */
#define TOY_MON GENMASK(31, 26)
#define TOY_DAY GENMASK(25, 21)
#define TOY_HOUR GENMASK(20, 16)
#define TOY_MIN GENMASK(15, 10)
#define TOY_SEC GENMASK(9, 4)
#define TOY_MSEC GENMASK(3, 0)
/* bitmask of TOY_MATCH0/1/2_REG */
#define TOY_MATCH_YEAR GENMASK(31, 26)
#define TOY_MATCH_MON GENMASK(25, 22)
#define TOY_MATCH_DAY GENMASK(21, 17)
#define TOY_MATCH_HOUR GENMASK(16, 12)
#define TOY_MATCH_MIN GENMASK(11, 6)
#define TOY_MATCH_SEC GENMASK(5, 0)
/* bitmask of RTC_CTRL_REG */
#define RTC_ENABLE BIT(13) /* 1: RTC counters enable */
#define TOY_ENABLE BIT(11) /* 1: TOY counters enable */
#define OSC_ENABLE BIT(8) /* 1: 32.768k crystal enable */
#define TOY_ENABLE_MASK (TOY_ENABLE | OSC_ENABLE)
/* PM domain registers */
#define PM1_STS_REG 0x0c /* Power management 1 status register */
#define RTC_STS BIT(10) /* RTC status */
#define PM1_EN_REG 0x10 /* Power management 1 enable register */
#define RTC_EN BIT(10) /* RTC event enable */
/*
* According to the LS1C manual, RTC_CTRL and alarm-related registers are not defined.
* Accessing the relevant registers will cause the system to hang.
*/
#define LS1C_RTC_CTRL_WORKAROUND BIT(0)
struct loongson_rtc_config {
u32 pm_offset; /* Offset of PM domain, for RTC alarm wakeup */
u32 flags; /* Workaround bits */
};
struct loongson_rtc_priv {
spinlock_t lock; /* protects PM registers access */
u32 fix_year; /* RTC alarm year compensation value */
struct rtc_device *rtcdev;
struct regmap *regmap;
void __iomem *pm_base; /* PM domain base, for RTC alarm wakeup */
const struct loongson_rtc_config *config;
};
static const struct loongson_rtc_config ls1b_rtc_config = {
.pm_offset = 0,
.flags = 0,
};
static const struct loongson_rtc_config ls1c_rtc_config = {
.pm_offset = 0,
.flags = LS1C_RTC_CTRL_WORKAROUND,
};
static const struct loongson_rtc_config generic_rtc_config = {
.pm_offset = 0x100,
.flags = 0,
};
static const struct loongson_rtc_config ls2k1000_rtc_config = {
.pm_offset = 0x800,
.flags = 0,
};
static const struct regmap_config loongson_rtc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
/* RTC alarm irq handler */
static irqreturn_t loongson_rtc_isr(int irq, void *id)
{
struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
return IRQ_HANDLED;
}
/* For ACPI fixed event handler */
static u32 loongson_rtc_handler(void *id)
{
struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
spin_lock(&priv->lock);
/* Disable RTC alarm wakeup and interrupt */
writel(readl(priv->pm_base + PM1_EN_REG) & ~RTC_EN,
priv->pm_base + PM1_EN_REG);
/* Clear RTC interrupt status */
writel(RTC_STS, priv->pm_base + PM1_STS_REG);
spin_unlock(&priv->lock);
/*
* The TOY_MATCH0_REG should be cleared 0 here,
* otherwise the interrupt cannot be cleared.
*/
return regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
}
static int loongson_rtc_set_enabled(struct device *dev)
{
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
if (priv->config->flags & LS1C_RTC_CTRL_WORKAROUND)
return 0;
/* Enable RTC TOY counters and crystal */
return regmap_update_bits(priv->regmap, RTC_CTRL_REG, TOY_ENABLE_MASK,
TOY_ENABLE_MASK);
}
static bool loongson_rtc_get_enabled(struct device *dev)
{
int ret;
u32 ctrl_data;
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
if (priv->config->flags & LS1C_RTC_CTRL_WORKAROUND)
return true;
ret = regmap_read(priv->regmap, RTC_CTRL_REG, &ctrl_data);
if (ret < 0)
return false;
return ctrl_data & TOY_ENABLE_MASK;
}
static int loongson_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
int ret;
u32 rtc_data[2];
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
if (!loongson_rtc_get_enabled(dev))
return -EINVAL;
ret = regmap_bulk_read(priv->regmap, TOY_READ0_REG, rtc_data,
ARRAY_SIZE(rtc_data));
if (ret < 0)
return ret;
tm->tm_sec = FIELD_GET(TOY_SEC, rtc_data[0]);
tm->tm_min = FIELD_GET(TOY_MIN, rtc_data[0]);
tm->tm_hour = FIELD_GET(TOY_HOUR, rtc_data[0]);
tm->tm_mday = FIELD_GET(TOY_DAY, rtc_data[0]);
tm->tm_mon = FIELD_GET(TOY_MON, rtc_data[0]) - 1;
tm->tm_year = rtc_data[1];
/* Prepare for RTC alarm year compensation value. */
priv->fix_year = tm->tm_year / 64 * 64;
return 0;
}
static int loongson_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
int ret;
u32 rtc_data[2];
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
rtc_data[0] = FIELD_PREP(TOY_SEC, tm->tm_sec)
| FIELD_PREP(TOY_MIN, tm->tm_min)
| FIELD_PREP(TOY_HOUR, tm->tm_hour)
| FIELD_PREP(TOY_DAY, tm->tm_mday)
| FIELD_PREP(TOY_MON, tm->tm_mon + 1);
rtc_data[1] = tm->tm_year;
ret = regmap_bulk_write(priv->regmap, TOY_WRITE0_REG, rtc_data,
ARRAY_SIZE(rtc_data));
if (ret < 0)
return ret;
return loongson_rtc_set_enabled(dev);
}
static int loongson_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
int ret;
u32 alarm_data;
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
ret = regmap_read(priv->regmap, TOY_MATCH0_REG, &alarm_data);
if (ret < 0)
return ret;
alrm->time.tm_sec = FIELD_GET(TOY_MATCH_SEC, alarm_data);
alrm->time.tm_min = FIELD_GET(TOY_MATCH_MIN, alarm_data);
alrm->time.tm_hour = FIELD_GET(TOY_MATCH_HOUR, alarm_data);
alrm->time.tm_mday = FIELD_GET(TOY_MATCH_DAY, alarm_data);
alrm->time.tm_mon = FIELD_GET(TOY_MATCH_MON, alarm_data) - 1;
/*
* This is a hardware bug: the year field of SYS_TOYMATCH is only 6 bits,
* making it impossible to save year values larger than 64.
*
* SYS_TOYMATCH is used to match the alarm time value and determine if
* an alarm is triggered, so we must keep the lower 6 bits of the year
* value constant during the value conversion.
*
* In summary, we need to manually add 64(or a multiple of 64) to the
* year value to avoid the invalid alarm prompt at startup.
*/
alrm->time.tm_year = FIELD_GET(TOY_MATCH_YEAR, alarm_data) + priv->fix_year;
alrm->enabled = !!(readl(priv->pm_base + PM1_EN_REG) & RTC_EN);
return 0;
}
static int loongson_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
u32 val;
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
spin_lock(&priv->lock);
val = readl(priv->pm_base + PM1_EN_REG);
/* Enable RTC alarm wakeup */
writel(enabled ? val | RTC_EN : val & ~RTC_EN,
priv->pm_base + PM1_EN_REG);
spin_unlock(&priv->lock);
return 0;
}
static int loongson_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
int ret;
u32 alarm_data;
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
alarm_data = FIELD_PREP(TOY_MATCH_SEC, alrm->time.tm_sec)
| FIELD_PREP(TOY_MATCH_MIN, alrm->time.tm_min)
| FIELD_PREP(TOY_MATCH_HOUR, alrm->time.tm_hour)
| FIELD_PREP(TOY_MATCH_DAY, alrm->time.tm_mday)
| FIELD_PREP(TOY_MATCH_MON, alrm->time.tm_mon + 1)
| FIELD_PREP(TOY_MATCH_YEAR, alrm->time.tm_year - priv->fix_year);
ret = regmap_write(priv->regmap, TOY_MATCH0_REG, alarm_data);
if (ret < 0)
return ret;
return loongson_rtc_alarm_irq_enable(dev, alrm->enabled);
}
static const struct rtc_class_ops loongson_rtc_ops = {
.read_time = loongson_rtc_read_time,
.set_time = loongson_rtc_set_time,
.read_alarm = loongson_rtc_read_alarm,
.set_alarm = loongson_rtc_set_alarm,
.alarm_irq_enable = loongson_rtc_alarm_irq_enable,
};
static int loongson_rtc_probe(struct platform_device *pdev)
{
int ret, alarm_irq;
void __iomem *regs;
struct loongson_rtc_priv *priv;
struct device *dev = &pdev->dev;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return dev_err_probe(dev, PTR_ERR(regs),
"devm_platform_ioremap_resource failed\n");
priv->regmap = devm_regmap_init_mmio(dev, regs,
&loongson_rtc_regmap_config);
if (IS_ERR(priv->regmap))
return dev_err_probe(dev, PTR_ERR(priv->regmap),
"devm_regmap_init_mmio failed\n");
priv->config = device_get_match_data(dev);
spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
priv->rtcdev = devm_rtc_allocate_device(dev);
if (IS_ERR(priv->rtcdev))
return dev_err_probe(dev, PTR_ERR(priv->rtcdev),
"devm_rtc_allocate_device failed\n");
/* Get RTC alarm irq */
alarm_irq = platform_get_irq(pdev, 0);
if (alarm_irq > 0) {
ret = devm_request_irq(dev, alarm_irq, loongson_rtc_isr,
0, "loongson-alarm", priv);
if (ret < 0)
return dev_err_probe(dev, ret, "Unable to request irq %d\n",
alarm_irq);
priv->pm_base = regs - priv->config->pm_offset;
device_init_wakeup(dev, 1);
if (has_acpi_companion(dev))
acpi_install_fixed_event_handler(ACPI_EVENT_RTC,
loongson_rtc_handler, priv);
} else {
/* Loongson-1C RTC does not support alarm */
clear_bit(RTC_FEATURE_ALARM, priv->rtcdev->features);
}
/* Loongson RTC does not support UIE */
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, priv->rtcdev->features);
priv->rtcdev->ops = &loongson_rtc_ops;
priv->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
priv->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
return devm_rtc_register_device(priv->rtcdev);
}
static void loongson_rtc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct loongson_rtc_priv *priv = dev_get_drvdata(dev);
if (!test_bit(RTC_FEATURE_ALARM, priv->rtcdev->features))
return;
if (has_acpi_companion(dev))
acpi_remove_fixed_event_handler(ACPI_EVENT_RTC,
loongson_rtc_handler);
device_init_wakeup(dev, 0);
loongson_rtc_alarm_irq_enable(dev, 0);
}
static const struct of_device_id loongson_rtc_of_match[] = {
{ .compatible = "loongson,ls1b-rtc", .data = &ls1b_rtc_config },
{ .compatible = "loongson,ls1c-rtc", .data = &ls1c_rtc_config },
{ .compatible = "loongson,ls7a-rtc", .data = &generic_rtc_config },
{ .compatible = "loongson,ls2k1000-rtc", .data = &ls2k1000_rtc_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, loongson_rtc_of_match);
static const struct acpi_device_id loongson_rtc_acpi_match[] = {
{ "LOON0001", .driver_data = (kernel_ulong_t)&generic_rtc_config },
{ }
};
MODULE_DEVICE_TABLE(acpi, loongson_rtc_acpi_match);
static struct platform_driver loongson_rtc_driver = {
.probe = loongson_rtc_probe,
.remove_new = loongson_rtc_remove,
.driver = {
.name = "loongson-rtc",
.of_match_table = loongson_rtc_of_match,
.acpi_match_table = loongson_rtc_acpi_match,
},
};
module_platform_driver(loongson_rtc_driver);
MODULE_DESCRIPTION("Loongson RTC driver");
MODULE_AUTHOR("Binbin Zhou <[email protected]>");
MODULE_AUTHOR("WANG Xuerui <[email protected]>");
MODULE_AUTHOR("Huacai Chen <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-loongson.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DS1286 Real Time Clock interface for Linux
*
* Copyright (C) 1998, 1999, 2000 Ralf Baechle
* Copyright (C) 2008 Thomas Bogendoerfer
*
* Based on code written by Paul Gortmaker.
*/
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
#include <linux/rtc/ds1286.h>
#include <linux/io.h>
#include <linux/slab.h>
struct ds1286_priv {
struct rtc_device *rtc;
u32 __iomem *rtcregs;
spinlock_t lock;
};
static inline u8 ds1286_rtc_read(struct ds1286_priv *priv, int reg)
{
return __raw_readl(&priv->rtcregs[reg]) & 0xff;
}
static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg)
{
__raw_writel(data, &priv->rtcregs[reg]);
}
static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1286_priv *priv = dev_get_drvdata(dev);
unsigned long flags;
unsigned char val;
/* Allow or mask alarm interrupts */
spin_lock_irqsave(&priv->lock, flags);
val = ds1286_rtc_read(priv, RTC_CMD);
if (enabled)
val &= ~RTC_TDM;
else
val |= RTC_TDM;
ds1286_rtc_write(priv, val, RTC_CMD);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
#ifdef CONFIG_RTC_INTF_DEV
static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct ds1286_priv *priv = dev_get_drvdata(dev);
unsigned long flags;
unsigned char val;
switch (cmd) {
case RTC_WIE_OFF:
/* Mask watchdog int. enab. bit */
spin_lock_irqsave(&priv->lock, flags);
val = ds1286_rtc_read(priv, RTC_CMD);
val |= RTC_WAM;
ds1286_rtc_write(priv, val, RTC_CMD);
spin_unlock_irqrestore(&priv->lock, flags);
break;
case RTC_WIE_ON:
/* Allow watchdog interrupts. */
spin_lock_irqsave(&priv->lock, flags);
val = ds1286_rtc_read(priv, RTC_CMD);
val &= ~RTC_WAM;
ds1286_rtc_write(priv, val, RTC_CMD);
spin_unlock_irqrestore(&priv->lock, flags);
break;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#else
#define ds1286_ioctl NULL
#endif
#ifdef CONFIG_PROC_FS
static int ds1286_proc(struct device *dev, struct seq_file *seq)
{
struct ds1286_priv *priv = dev_get_drvdata(dev);
unsigned char month, cmd, amode;
const char *s;
month = ds1286_rtc_read(priv, RTC_MONTH);
seq_printf(seq,
"oscillator\t: %s\n"
"square_wave\t: %s\n",
(month & RTC_EOSC) ? "disabled" : "enabled",
(month & RTC_ESQW) ? "disabled" : "enabled");
amode = ((ds1286_rtc_read(priv, RTC_MINUTES_ALARM) & 0x80) >> 5) |
((ds1286_rtc_read(priv, RTC_HOURS_ALARM) & 0x80) >> 6) |
((ds1286_rtc_read(priv, RTC_DAY_ALARM) & 0x80) >> 7);
switch (amode) {
case 7:
s = "each minute";
break;
case 3:
s = "minutes match";
break;
case 1:
s = "hours and minutes match";
break;
case 0:
s = "days, hours and minutes match";
break;
default:
s = "invalid";
break;
}
seq_printf(seq, "alarm_mode\t: %s\n", s);
cmd = ds1286_rtc_read(priv, RTC_CMD);
seq_printf(seq,
"alarm_enable\t: %s\n"
"wdog_alarm\t: %s\n"
"alarm_mask\t: %s\n"
"wdog_alarm_mask\t: %s\n"
"interrupt_mode\t: %s\n"
"INTB_mode\t: %s_active\n"
"interrupt_pins\t: %s\n",
(cmd & RTC_TDF) ? "yes" : "no",
(cmd & RTC_WAF) ? "yes" : "no",
(cmd & RTC_TDM) ? "disabled" : "enabled",
(cmd & RTC_WAM) ? "disabled" : "enabled",
(cmd & RTC_PU_LVL) ? "pulse" : "level",
(cmd & RTC_IBH_LO) ? "low" : "high",
(cmd & RTC_IPSW) ? "unswapped" : "swapped");
return 0;
}
#else
#define ds1286_proc NULL
#endif
static int ds1286_read_time(struct device *dev, struct rtc_time *tm)
{
struct ds1286_priv *priv = dev_get_drvdata(dev);
unsigned char save_control;
unsigned long flags;
unsigned long uip_watchdog = jiffies;
/*
* read RTC once any update in progress is done. The update
* can take just over 2ms. We wait 10 to 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
* periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
if (ds1286_rtc_read(priv, RTC_CMD) & RTC_TE)
while (time_before(jiffies, uip_watchdog + 2*HZ/100))
barrier();
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irqsave(&priv->lock, flags);
save_control = ds1286_rtc_read(priv, RTC_CMD);
ds1286_rtc_write(priv, (save_control|RTC_TE), RTC_CMD);
tm->tm_sec = ds1286_rtc_read(priv, RTC_SECONDS);
tm->tm_min = ds1286_rtc_read(priv, RTC_MINUTES);
tm->tm_hour = ds1286_rtc_read(priv, RTC_HOURS) & 0x3f;
tm->tm_mday = ds1286_rtc_read(priv, RTC_DATE);
tm->tm_mon = ds1286_rtc_read(priv, RTC_MONTH) & 0x1f;
tm->tm_year = ds1286_rtc_read(priv, RTC_YEAR);
ds1286_rtc_write(priv, save_control, RTC_CMD);
spin_unlock_irqrestore(&priv->lock, flags);
tm->tm_sec = bcd2bin(tm->tm_sec);
tm->tm_min = bcd2bin(tm->tm_min);
tm->tm_hour = bcd2bin(tm->tm_hour);
tm->tm_mday = bcd2bin(tm->tm_mday);
tm->tm_mon = bcd2bin(tm->tm_mon);
tm->tm_year = bcd2bin(tm->tm_year);
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
if (tm->tm_year < 45)
tm->tm_year += 30;
tm->tm_year += 40;
if (tm->tm_year < 70)
tm->tm_year += 100;
tm->tm_mon--;
return 0;
}
static int ds1286_set_time(struct device *dev, struct rtc_time *tm)
{
struct ds1286_priv *priv = dev_get_drvdata(dev);
unsigned char mon, day, hrs, min, sec;
unsigned char save_control;
unsigned int yrs;
unsigned long flags;
yrs = tm->tm_year + 1900;
mon = tm->tm_mon + 1; /* tm_mon starts at zero */
day = tm->tm_mday;
hrs = tm->tm_hour;
min = tm->tm_min;
sec = tm->tm_sec;
if (yrs < 1970)
return -EINVAL;
yrs -= 1940;
if (yrs > 255) /* They are unsigned */
return -EINVAL;
if (yrs >= 100)
yrs -= 100;
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
spin_lock_irqsave(&priv->lock, flags);
save_control = ds1286_rtc_read(priv, RTC_CMD);
ds1286_rtc_write(priv, (save_control|RTC_TE), RTC_CMD);
ds1286_rtc_write(priv, yrs, RTC_YEAR);
ds1286_rtc_write(priv, mon, RTC_MONTH);
ds1286_rtc_write(priv, day, RTC_DATE);
ds1286_rtc_write(priv, hrs, RTC_HOURS);
ds1286_rtc_write(priv, min, RTC_MINUTES);
ds1286_rtc_write(priv, sec, RTC_SECONDS);
ds1286_rtc_write(priv, 0, RTC_HUNDREDTH_SECOND);
ds1286_rtc_write(priv, save_control, RTC_CMD);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int ds1286_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct ds1286_priv *priv = dev_get_drvdata(dev);
unsigned long flags;
/*
* Only the values that we read from the RTC are set. That
* means only tm_wday, tm_hour, tm_min.
*/
spin_lock_irqsave(&priv->lock, flags);
alm->time.tm_min = ds1286_rtc_read(priv, RTC_MINUTES_ALARM) & 0x7f;
alm->time.tm_hour = ds1286_rtc_read(priv, RTC_HOURS_ALARM) & 0x1f;
alm->time.tm_wday = ds1286_rtc_read(priv, RTC_DAY_ALARM) & 0x07;
ds1286_rtc_read(priv, RTC_CMD);
spin_unlock_irqrestore(&priv->lock, flags);
alm->time.tm_min = bcd2bin(alm->time.tm_min);
alm->time.tm_hour = bcd2bin(alm->time.tm_hour);
alm->time.tm_sec = 0;
return 0;
}
static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct ds1286_priv *priv = dev_get_drvdata(dev);
unsigned char hrs, min, sec;
hrs = alm->time.tm_hour;
min = alm->time.tm_min;
sec = alm->time.tm_sec;
if (hrs >= 24)
hrs = 0xff;
if (min >= 60)
min = 0xff;
if (sec != 0)
return -EINVAL;
min = bin2bcd(min);
hrs = bin2bcd(hrs);
spin_lock(&priv->lock);
ds1286_rtc_write(priv, hrs, RTC_HOURS_ALARM);
ds1286_rtc_write(priv, min, RTC_MINUTES_ALARM);
spin_unlock(&priv->lock);
return 0;
}
static const struct rtc_class_ops ds1286_ops = {
.ioctl = ds1286_ioctl,
.proc = ds1286_proc,
.read_time = ds1286_read_time,
.set_time = ds1286_set_time,
.read_alarm = ds1286_read_alarm,
.set_alarm = ds1286_set_alarm,
.alarm_irq_enable = ds1286_alarm_irq_enable,
};
static int ds1286_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct ds1286_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(struct ds1286_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->rtcregs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->rtcregs))
return PTR_ERR(priv->rtcregs);
spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
rtc = devm_rtc_device_register(&pdev->dev, "ds1286", &ds1286_ops,
THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
priv->rtc = rtc;
return 0;
}
static struct platform_driver ds1286_platform_driver = {
.driver = {
.name = "rtc-ds1286",
},
.probe = ds1286_probe,
};
module_platform_driver(ds1286_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <[email protected]>");
MODULE_DESCRIPTION("DS1286 RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rtc-ds1286");
| linux-master | drivers/rtc/rtc-ds1286.c |
// SPDX-License-Identifier: GPL-2.0-only
/* rtc-generic: RTC driver using the generic RTC abstraction
*
* Copyright (C) 2008 Kyle McMartin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
static int __init generic_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
const struct rtc_class_ops *ops = dev_get_platdata(&dev->dev);
rtc = devm_rtc_device_register(&dev->dev, "rtc-generic",
ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
platform_set_drvdata(dev, rtc);
return 0;
}
static struct platform_driver generic_rtc_driver = {
.driver = {
.name = "rtc-generic",
},
};
module_platform_driver_probe(generic_rtc_driver, generic_rtc_probe);
MODULE_AUTHOR("Kyle McMartin <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic RTC driver");
MODULE_ALIAS("platform:rtc-generic");
| linux-master | drivers/rtc/rtc-generic.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Nuvoton Technology Corporation
#include <linux/bcd.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#define NCT3018Y_REG_SC 0x00 /* seconds */
#define NCT3018Y_REG_SCA 0x01 /* alarm */
#define NCT3018Y_REG_MN 0x02
#define NCT3018Y_REG_MNA 0x03 /* alarm */
#define NCT3018Y_REG_HR 0x04
#define NCT3018Y_REG_HRA 0x05 /* alarm */
#define NCT3018Y_REG_DW 0x06
#define NCT3018Y_REG_DM 0x07
#define NCT3018Y_REG_MO 0x08
#define NCT3018Y_REG_YR 0x09
#define NCT3018Y_REG_CTRL 0x0A /* timer control */
#define NCT3018Y_REG_ST 0x0B /* status */
#define NCT3018Y_REG_CLKO 0x0C /* clock out */
#define NCT3018Y_BIT_AF BIT(7)
#define NCT3018Y_BIT_ST BIT(7)
#define NCT3018Y_BIT_DM BIT(6)
#define NCT3018Y_BIT_HF BIT(5)
#define NCT3018Y_BIT_DSM BIT(4)
#define NCT3018Y_BIT_AIE BIT(3)
#define NCT3018Y_BIT_OFIE BIT(2)
#define NCT3018Y_BIT_CIE BIT(1)
#define NCT3018Y_BIT_TWO BIT(0)
#define NCT3018Y_REG_BAT_MASK 0x07
#define NCT3018Y_REG_CLKO_F_MASK 0x03 /* frequenc mask */
#define NCT3018Y_REG_CLKO_CKE 0x80 /* clock out enabled */
struct nct3018y {
struct rtc_device *rtc;
struct i2c_client *client;
#ifdef CONFIG_COMMON_CLK
struct clk_hw clkout_hw;
#endif
};
static int nct3018y_set_alarm_mode(struct i2c_client *client, bool on)
{
int err, flags;
dev_dbg(&client->dev, "%s:on:%d\n", __func__, on);
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
if (flags < 0) {
dev_dbg(&client->dev,
"Failed to read NCT3018Y_REG_CTRL\n");
return flags;
}
if (on)
flags |= NCT3018Y_BIT_AIE;
else
flags &= ~NCT3018Y_BIT_AIE;
flags |= NCT3018Y_BIT_CIE;
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
return err;
}
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
if (flags < 0) {
dev_dbg(&client->dev,
"Failed to read NCT3018Y_REG_ST\n");
return flags;
}
flags &= ~(NCT3018Y_BIT_AF);
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_ST\n");
return err;
}
return 0;
}
static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *alarm_enable,
unsigned char *alarm_flag)
{
int flags;
if (alarm_enable) {
dev_dbg(&client->dev, "%s:NCT3018Y_REG_CTRL\n", __func__);
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
if (flags < 0)
return flags;
*alarm_enable = flags & NCT3018Y_BIT_AIE;
}
if (alarm_flag) {
dev_dbg(&client->dev, "%s:NCT3018Y_REG_ST\n", __func__);
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
if (flags < 0)
return flags;
*alarm_flag = flags & NCT3018Y_BIT_AF;
}
dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
__func__, *alarm_enable, *alarm_flag);
return 0;
}
static irqreturn_t nct3018y_irq(int irq, void *dev_id)
{
struct nct3018y *nct3018y = i2c_get_clientdata(dev_id);
struct i2c_client *client = nct3018y->client;
int err;
unsigned char alarm_flag;
unsigned char alarm_enable;
dev_dbg(&client->dev, "%s:irq:%d\n", __func__, irq);
err = nct3018y_get_alarm_mode(nct3018y->client, &alarm_enable, &alarm_flag);
if (err)
return IRQ_NONE;
if (alarm_flag) {
dev_dbg(&client->dev, "%s:alarm flag:%x\n",
__func__, alarm_flag);
rtc_update_irq(nct3018y->rtc, 1, RTC_IRQF | RTC_AF);
nct3018y_set_alarm_mode(nct3018y->client, 0);
dev_dbg(&client->dev, "%s:IRQ_HANDLED\n", __func__);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/*
* In the routines that deal directly with the nct3018y hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[10];
int err;
err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_ST, 1, buf);
if (err < 0)
return err;
if (!buf[0]) {
dev_dbg(&client->dev, " voltage <=1.7, date/time is not reliable.\n");
return -EINVAL;
}
err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SC, sizeof(buf), buf);
if (err < 0)
return err;
tm->tm_sec = bcd2bin(buf[0] & 0x7F);
tm->tm_min = bcd2bin(buf[2] & 0x7F);
tm->tm_hour = bcd2bin(buf[4] & 0x3F);
tm->tm_wday = buf[6] & 0x07;
tm->tm_mday = bcd2bin(buf[7] & 0x3F);
tm->tm_mon = bcd2bin(buf[8] & 0x1F) - 1;
tm->tm_year = bcd2bin(buf[9]) + 100;
return 0;
}
static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[4] = {0};
int err;
buf[0] = bin2bcd(tm->tm_sec);
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]);
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SC\n");
return err;
}
buf[0] = bin2bcd(tm->tm_min);
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MN, buf[0]);
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MN\n");
return err;
}
buf[0] = bin2bcd(tm->tm_hour);
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HR, buf[0]);
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HR\n");
return err;
}
buf[0] = tm->tm_wday & 0x07;
buf[1] = bin2bcd(tm->tm_mday);
buf[2] = bin2bcd(tm->tm_mon + 1);
buf[3] = bin2bcd(tm->tm_year - 100);
err = i2c_smbus_write_i2c_block_data(client, NCT3018Y_REG_DW,
sizeof(buf), buf);
if (err < 0) {
dev_dbg(&client->dev, "Unable to write for day and mon and year\n");
return -EIO;
}
return err;
}
static int nct3018y_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[5];
int err;
err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SCA,
sizeof(buf), buf);
if (err < 0) {
dev_dbg(&client->dev, "Unable to read date\n");
return -EIO;
}
dev_dbg(&client->dev, "%s: raw data is sec=%02x, min=%02x hr=%02x\n",
__func__, buf[0], buf[2], buf[4]);
tm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
tm->time.tm_min = bcd2bin(buf[2] & 0x7F);
tm->time.tm_hour = bcd2bin(buf[4] & 0x3F);
err = nct3018y_get_alarm_mode(client, &tm->enabled, &tm->pending);
if (err < 0)
return err;
dev_dbg(&client->dev, "%s:s=%d m=%d, hr=%d, enabled=%d, pending=%d\n",
__func__, tm->time.tm_sec, tm->time.tm_min,
tm->time.tm_hour, tm->enabled, tm->pending);
return 0;
}
static int nct3018y_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
struct i2c_client *client = to_i2c_client(dev);
int err;
dev_dbg(dev, "%s, sec=%d, min=%d hour=%d tm->enabled:%d\n",
__func__, tm->time.tm_sec, tm->time.tm_min, tm->time.tm_hour,
tm->enabled);
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SCA, bin2bcd(tm->time.tm_sec));
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SCA\n");
return err;
}
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MNA, bin2bcd(tm->time.tm_min));
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MNA\n");
return err;
}
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HRA, bin2bcd(tm->time.tm_hour));
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HRA\n");
return err;
}
return nct3018y_set_alarm_mode(client, tm->enabled);
}
static int nct3018y_irq_enable(struct device *dev, unsigned int enabled)
{
dev_dbg(dev, "%s: alarm enable=%d\n", __func__, enabled);
return nct3018y_set_alarm_mode(to_i2c_client(dev), enabled);
}
static int nct3018y_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct i2c_client *client = to_i2c_client(dev);
int status, flags = 0;
switch (cmd) {
case RTC_VL_READ:
status = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
if (status < 0)
return status;
if (!(status & NCT3018Y_REG_BAT_MASK))
flags |= RTC_VL_DATA_INVALID;
return put_user(flags, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
#ifdef CONFIG_COMMON_CLK
/*
* Handling of the clkout
*/
#define clkout_hw_to_nct3018y(_hw) container_of(_hw, struct nct3018y, clkout_hw)
static const int clkout_rates[] = {
32768,
1024,
32,
1,
};
static unsigned long nct3018y_clkout_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
struct i2c_client *client = nct3018y->client;
int flags;
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
if (flags < 0)
return 0;
flags &= NCT3018Y_REG_CLKO_F_MASK;
return clkout_rates[flags];
}
static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
if (clkout_rates[i] <= rate)
return clkout_rates[i];
return 0;
}
static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
struct i2c_client *client = nct3018y->client;
int i, flags;
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
if (flags < 0)
return flags;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
if (clkout_rates[i] == rate) {
flags &= ~NCT3018Y_REG_CLKO_F_MASK;
flags |= i;
return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
}
return -EINVAL;
}
static int nct3018y_clkout_control(struct clk_hw *hw, bool enable)
{
struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
struct i2c_client *client = nct3018y->client;
int flags;
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
if (flags < 0)
return flags;
if (enable)
flags |= NCT3018Y_REG_CLKO_CKE;
else
flags &= ~NCT3018Y_REG_CLKO_CKE;
return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
}
static int nct3018y_clkout_prepare(struct clk_hw *hw)
{
return nct3018y_clkout_control(hw, 1);
}
static void nct3018y_clkout_unprepare(struct clk_hw *hw)
{
nct3018y_clkout_control(hw, 0);
}
static int nct3018y_clkout_is_prepared(struct clk_hw *hw)
{
struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
struct i2c_client *client = nct3018y->client;
int flags;
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
if (flags < 0)
return flags;
return flags & NCT3018Y_REG_CLKO_CKE;
}
static const struct clk_ops nct3018y_clkout_ops = {
.prepare = nct3018y_clkout_prepare,
.unprepare = nct3018y_clkout_unprepare,
.is_prepared = nct3018y_clkout_is_prepared,
.recalc_rate = nct3018y_clkout_recalc_rate,
.round_rate = nct3018y_clkout_round_rate,
.set_rate = nct3018y_clkout_set_rate,
};
static struct clk *nct3018y_clkout_register_clk(struct nct3018y *nct3018y)
{
struct i2c_client *client = nct3018y->client;
struct device_node *node = client->dev.of_node;
struct clk *clk;
struct clk_init_data init;
init.name = "nct3018y-clkout";
init.ops = &nct3018y_clkout_ops;
init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
nct3018y->clkout_hw.init = &init;
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
/* register the clock */
clk = devm_clk_register(&client->dev, &nct3018y->clkout_hw);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
#endif
static const struct rtc_class_ops nct3018y_rtc_ops = {
.read_time = nct3018y_rtc_read_time,
.set_time = nct3018y_rtc_set_time,
.read_alarm = nct3018y_rtc_read_alarm,
.set_alarm = nct3018y_rtc_set_alarm,
.alarm_irq_enable = nct3018y_irq_enable,
.ioctl = nct3018y_ioctl,
};
static int nct3018y_probe(struct i2c_client *client)
{
struct nct3018y *nct3018y;
int err, flags;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
nct3018y = devm_kzalloc(&client->dev, sizeof(struct nct3018y),
GFP_KERNEL);
if (!nct3018y)
return -ENOMEM;
i2c_set_clientdata(client, nct3018y);
nct3018y->client = client;
device_set_wakeup_capable(&client->dev, 1);
flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
if (flags < 0) {
dev_dbg(&client->dev, "%s: read error\n", __func__);
return flags;
} else if (flags & NCT3018Y_BIT_TWO) {
dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__);
}
flags = NCT3018Y_BIT_TWO;
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
if (err < 0) {
dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
return err;
}
flags = 0;
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
if (err < 0) {
dev_dbg(&client->dev, "%s: write error\n", __func__);
return err;
}
nct3018y->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(nct3018y->rtc))
return PTR_ERR(nct3018y->rtc);
nct3018y->rtc->ops = &nct3018y_rtc_ops;
nct3018y->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
nct3018y->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq > 0) {
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, nct3018y_irq,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
"nct3018y", client);
if (err) {
dev_dbg(&client->dev, "unable to request IRQ %d\n", client->irq);
return err;
}
} else {
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, nct3018y->rtc->features);
clear_bit(RTC_FEATURE_ALARM, nct3018y->rtc->features);
}
#ifdef CONFIG_COMMON_CLK
/* register clk in common clk framework */
nct3018y_clkout_register_clk(nct3018y);
#endif
return devm_rtc_register_device(nct3018y->rtc);
}
static const struct i2c_device_id nct3018y_id[] = {
{ "nct3018y", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, nct3018y_id);
static const struct of_device_id nct3018y_of_match[] = {
{ .compatible = "nuvoton,nct3018y" },
{}
};
MODULE_DEVICE_TABLE(of, nct3018y_of_match);
static struct i2c_driver nct3018y_driver = {
.driver = {
.name = "rtc-nct3018y",
.of_match_table = nct3018y_of_match,
},
.probe = nct3018y_probe,
.id_table = nct3018y_id,
};
module_i2c_driver(nct3018y_driver);
MODULE_AUTHOR("Medad CChien <[email protected]>");
MODULE_AUTHOR("Mia Lin <[email protected]>");
MODULE_DESCRIPTION("Nuvoton NCT3018Y RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-nct3018y.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Real Time Clock driver for Marvell 88PM80x PMIC
*
* Copyright (c) 2012 Marvell International Ltd.
* Wenzeng Chen<[email protected]>
* Qiao Zhou <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/88pm80x.h>
#include <linux/rtc.h>
#define PM800_RTC_COUNTER1 (0xD1)
#define PM800_RTC_COUNTER2 (0xD2)
#define PM800_RTC_COUNTER3 (0xD3)
#define PM800_RTC_COUNTER4 (0xD4)
#define PM800_RTC_EXPIRE1_1 (0xD5)
#define PM800_RTC_EXPIRE1_2 (0xD6)
#define PM800_RTC_EXPIRE1_3 (0xD7)
#define PM800_RTC_EXPIRE1_4 (0xD8)
#define PM800_RTC_TRIM1 (0xD9)
#define PM800_RTC_TRIM2 (0xDA)
#define PM800_RTC_TRIM3 (0xDB)
#define PM800_RTC_TRIM4 (0xDC)
#define PM800_RTC_EXPIRE2_1 (0xDD)
#define PM800_RTC_EXPIRE2_2 (0xDE)
#define PM800_RTC_EXPIRE2_3 (0xDF)
#define PM800_RTC_EXPIRE2_4 (0xE0)
#define PM800_POWER_DOWN_LOG1 (0xE5)
#define PM800_POWER_DOWN_LOG2 (0xE6)
struct pm80x_rtc_info {
struct pm80x_chip *chip;
struct regmap *map;
struct rtc_device *rtc_dev;
struct device *dev;
int irq;
};
static irqreturn_t rtc_update_handler(int irq, void *data)
{
struct pm80x_rtc_info *info = (struct pm80x_rtc_info *)data;
int mask;
mask = PM800_ALARM | PM800_ALARM_WAKEUP;
regmap_update_bits(info->map, PM800_RTC_CONTROL, mask | PM800_ALARM1_EN,
mask);
rtc_update_irq(info->rtc_dev, 1, RTC_AF);
return IRQ_HANDLED;
}
static int pm80x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct pm80x_rtc_info *info = dev_get_drvdata(dev);
if (enabled)
regmap_update_bits(info->map, PM800_RTC_CONTROL,
PM800_ALARM1_EN, PM800_ALARM1_EN);
else
regmap_update_bits(info->map, PM800_RTC_CONTROL,
PM800_ALARM1_EN, 0);
return 0;
}
/*
* Calculate the next alarm time given the requested alarm time mask
* and the current time.
*/
static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
struct rtc_time *alrm)
{
unsigned long next_time;
unsigned long now_time;
next->tm_year = now->tm_year;
next->tm_mon = now->tm_mon;
next->tm_mday = now->tm_mday;
next->tm_hour = alrm->tm_hour;
next->tm_min = alrm->tm_min;
next->tm_sec = alrm->tm_sec;
now_time = rtc_tm_to_time64(now);
next_time = rtc_tm_to_time64(next);
if (next_time < now_time) {
/* Advance one day */
next_time += 60 * 60 * 24;
rtc_time64_to_tm(next_time, next);
}
}
static int pm80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pm80x_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[4];
unsigned long ticks, base, data;
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
/* load 32-bit read-only counter */
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
rtc_time64_to_tm(ticks, tm);
return 0;
}
static int pm80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct pm80x_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[4];
unsigned long ticks, base, data;
ticks = rtc_tm_to_time64(tm);
/* load 32-bit read-only counter */
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
base = ticks - data;
dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
buf[0] = base & 0xFF;
buf[1] = (base >> 8) & 0xFF;
buf[2] = (base >> 16) & 0xFF;
buf[3] = (base >> 24) & 0xFF;
regmap_raw_write(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
return 0;
}
static int pm80x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pm80x_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[4];
unsigned long ticks, base, data;
int ret;
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
rtc_time64_to_tm(ticks, &alrm->time);
regmap_read(info->map, PM800_RTC_CONTROL, &ret);
alrm->enabled = (ret & PM800_ALARM1_EN) ? 1 : 0;
alrm->pending = (ret & (PM800_ALARM | PM800_ALARM_WAKEUP)) ? 1 : 0;
return 0;
}
static int pm80x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pm80x_rtc_info *info = dev_get_drvdata(dev);
struct rtc_time now_tm, alarm_tm;
unsigned long ticks, base, data;
unsigned char buf[4];
int mask;
regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0);
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
/* load 32-bit read-only counter */
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
rtc_time64_to_tm(ticks, &now_tm);
dev_dbg(info->dev, "%s, now time : %lu\n", __func__, ticks);
rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
/* get new ticks for alarm in 24 hours */
ticks = rtc_tm_to_time64(&alarm_tm);
dev_dbg(info->dev, "%s, alarm time: %lu\n", __func__, ticks);
data = ticks - base;
buf[0] = data & 0xff;
buf[1] = (data >> 8) & 0xff;
buf[2] = (data >> 16) & 0xff;
buf[3] = (data >> 24) & 0xff;
regmap_raw_write(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
if (alrm->enabled) {
mask = PM800_ALARM | PM800_ALARM_WAKEUP | PM800_ALARM1_EN;
regmap_update_bits(info->map, PM800_RTC_CONTROL, mask, mask);
} else {
mask = PM800_ALARM | PM800_ALARM_WAKEUP | PM800_ALARM1_EN;
regmap_update_bits(info->map, PM800_RTC_CONTROL, mask,
PM800_ALARM | PM800_ALARM_WAKEUP);
}
return 0;
}
static const struct rtc_class_ops pm80x_rtc_ops = {
.read_time = pm80x_rtc_read_time,
.set_time = pm80x_rtc_set_time,
.read_alarm = pm80x_rtc_read_alarm,
.set_alarm = pm80x_rtc_set_alarm,
.alarm_irq_enable = pm80x_rtc_alarm_irq_enable,
};
#ifdef CONFIG_PM_SLEEP
static int pm80x_rtc_suspend(struct device *dev)
{
return pm80x_dev_suspend(dev);
}
static int pm80x_rtc_resume(struct device *dev)
{
return pm80x_dev_resume(dev);
}
#endif
static SIMPLE_DEV_PM_OPS(pm80x_rtc_pm_ops, pm80x_rtc_suspend, pm80x_rtc_resume);
static int pm80x_rtc_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_rtc_pdata *pdata = dev_get_platdata(&pdev->dev);
struct pm80x_rtc_info *info;
struct device_node *node = pdev->dev.of_node;
int ret;
if (!pdata && !node) {
dev_err(&pdev->dev,
"pm80x-rtc requires platform data or of_node\n");
return -EINVAL;
}
if (!pdata) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
}
info =
devm_kzalloc(&pdev->dev, sizeof(struct pm80x_rtc_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0) {
ret = -EINVAL;
goto out;
}
info->chip = chip;
info->map = chip->regmap;
if (!info->map) {
dev_err(&pdev->dev, "no regmap!\n");
ret = -EINVAL;
goto out;
}
info->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, info);
info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc_dev))
return PTR_ERR(info->rtc_dev);
ret = pm80x_request_irq(chip, info->irq, rtc_update_handler,
IRQF_ONESHOT, "rtc", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq, ret);
goto out;
}
info->rtc_dev->ops = &pm80x_rtc_ops;
info->rtc_dev->range_max = U32_MAX;
ret = devm_rtc_register_device(info->rtc_dev);
if (ret)
goto out_rtc;
/*
* enable internal XO instead of internal 3.25MHz clock since it can
* free running in PMIC power-down state.
*/
regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO,
PM800_RTC1_USE_XO);
/* remember whether this power up is caused by PMIC RTC or not */
info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
device_init_wakeup(&pdev->dev, 1);
return 0;
out_rtc:
pm80x_free_irq(chip, info->irq, info);
out:
return ret;
}
static void pm80x_rtc_remove(struct platform_device *pdev)
{
struct pm80x_rtc_info *info = platform_get_drvdata(pdev);
pm80x_free_irq(info->chip, info->irq, info);
}
static struct platform_driver pm80x_rtc_driver = {
.driver = {
.name = "88pm80x-rtc",
.pm = &pm80x_rtc_pm_ops,
},
.probe = pm80x_rtc_probe,
.remove_new = pm80x_rtc_remove,
};
module_platform_driver(pm80x_rtc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Marvell 88PM80x RTC driver");
MODULE_AUTHOR("Qiao Zhou <[email protected]>");
MODULE_ALIAS("platform:88pm80x-rtc");
| linux-master | drivers/rtc/rtc-88pm80x.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2004-2008 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/io.h>
#include <linux/rtc.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/clk.h>
#include <linux/of.h>
#define RTC_INPUT_CLK_32768HZ (0x00 << 5)
#define RTC_INPUT_CLK_32000HZ (0x01 << 5)
#define RTC_INPUT_CLK_38400HZ (0x02 << 5)
#define RTC_SW_BIT (1 << 0)
#define RTC_ALM_BIT (1 << 2)
#define RTC_1HZ_BIT (1 << 4)
#define RTC_2HZ_BIT (1 << 7)
#define RTC_SAM0_BIT (1 << 8)
#define RTC_SAM1_BIT (1 << 9)
#define RTC_SAM2_BIT (1 << 10)
#define RTC_SAM3_BIT (1 << 11)
#define RTC_SAM4_BIT (1 << 12)
#define RTC_SAM5_BIT (1 << 13)
#define RTC_SAM6_BIT (1 << 14)
#define RTC_SAM7_BIT (1 << 15)
#define PIT_ALL_ON (RTC_2HZ_BIT | RTC_SAM0_BIT | RTC_SAM1_BIT | \
RTC_SAM2_BIT | RTC_SAM3_BIT | RTC_SAM4_BIT | \
RTC_SAM5_BIT | RTC_SAM6_BIT | RTC_SAM7_BIT)
#define RTC_ENABLE_BIT (1 << 7)
#define MAX_PIE_NUM 9
#define MAX_PIE_FREQ 512
#define MXC_RTC_TIME 0
#define MXC_RTC_ALARM 1
#define RTC_HOURMIN 0x00 /* 32bit rtc hour/min counter reg */
#define RTC_SECOND 0x04 /* 32bit rtc seconds counter reg */
#define RTC_ALRM_HM 0x08 /* 32bit rtc alarm hour/min reg */
#define RTC_ALRM_SEC 0x0C /* 32bit rtc alarm seconds reg */
#define RTC_RTCCTL 0x10 /* 32bit rtc control reg */
#define RTC_RTCISR 0x14 /* 32bit rtc interrupt status reg */
#define RTC_RTCIENR 0x18 /* 32bit rtc interrupt enable reg */
#define RTC_STPWCH 0x1C /* 32bit rtc stopwatch min reg */
#define RTC_DAYR 0x20 /* 32bit rtc days counter reg */
#define RTC_DAYALARM 0x24 /* 32bit rtc day alarm reg */
#define RTC_TEST1 0x28 /* 32bit rtc test reg 1 */
#define RTC_TEST2 0x2C /* 32bit rtc test reg 2 */
#define RTC_TEST3 0x30 /* 32bit rtc test reg 3 */
enum imx_rtc_type {
IMX1_RTC,
IMX21_RTC,
};
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
int irq;
struct clk *clk_ref;
struct clk *clk_ipg;
struct rtc_time g_rtc_alarm;
enum imx_rtc_type devtype;
};
static const struct of_device_id imx_rtc_dt_ids[] = {
{ .compatible = "fsl,imx1-rtc", .data = (const void *)IMX1_RTC },
{ .compatible = "fsl,imx21-rtc", .data = (const void *)IMX21_RTC },
{}
};
MODULE_DEVICE_TABLE(of, imx_rtc_dt_ids);
static inline int is_imx1_rtc(struct rtc_plat_data *data)
{
return data->devtype == IMX1_RTC;
}
/*
* This function is used to obtain the RTC time or the alarm value in
* second.
*/
static time64_t get_alarm_or_time(struct device *dev, int time_alarm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 day = 0, hr = 0, min = 0, sec = 0, hr_min = 0;
switch (time_alarm) {
case MXC_RTC_TIME:
day = readw(ioaddr + RTC_DAYR);
hr_min = readw(ioaddr + RTC_HOURMIN);
sec = readw(ioaddr + RTC_SECOND);
break;
case MXC_RTC_ALARM:
day = readw(ioaddr + RTC_DAYALARM);
hr_min = readw(ioaddr + RTC_ALRM_HM) & 0xffff;
sec = readw(ioaddr + RTC_ALRM_SEC);
break;
}
hr = hr_min >> 8;
min = hr_min & 0xff;
return ((((time64_t)day * 24 + hr) * 60) + min) * 60 + sec;
}
/*
* This function sets the RTC alarm value or the time value.
*/
static void set_alarm_or_time(struct device *dev, int time_alarm, time64_t time)
{
u32 tod, day, hr, min, sec, temp;
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
day = div_s64_rem(time, 86400, &tod);
/* time is within a day now */
hr = tod / 3600;
tod -= hr * 3600;
/* time is within an hour now */
min = tod / 60;
sec = tod - min * 60;
temp = (hr << 8) + min;
switch (time_alarm) {
case MXC_RTC_TIME:
writew(day, ioaddr + RTC_DAYR);
writew(sec, ioaddr + RTC_SECOND);
writew(temp, ioaddr + RTC_HOURMIN);
break;
case MXC_RTC_ALARM:
writew(day, ioaddr + RTC_DAYALARM);
writew(sec, ioaddr + RTC_ALRM_SEC);
writew(temp, ioaddr + RTC_ALRM_HM);
break;
}
}
/*
* This function updates the RTC alarm registers and then clears all the
* interrupt status bits.
*/
static void rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
{
time64_t time;
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
time = rtc_tm_to_time64(alrm);
/* clear all the interrupt status bits */
writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR);
set_alarm_or_time(dev, MXC_RTC_ALARM, time);
}
static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
unsigned int enabled)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
u32 reg;
unsigned long flags;
spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
reg = readw(ioaddr + RTC_RTCIENR);
if (enabled)
reg |= bit;
else
reg &= ~bit;
writew(reg, ioaddr + RTC_RTCIENR);
spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
}
/* This function is the RTC interrupt service routine. */
static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
u32 status;
u32 events = 0;
spin_lock(&pdata->rtc->irq_lock);
status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
/* update irq data & counter */
if (status & RTC_ALM_BIT) {
events |= (RTC_AF | RTC_IRQF);
/* RTC alarm should be one-shot */
mxc_rtc_irq_enable(&pdev->dev, RTC_ALM_BIT, 0);
}
if (status & PIT_ALL_ON)
events |= (RTC_PF | RTC_IRQF);
rtc_update_irq(pdata->rtc, 1, events);
spin_unlock(&pdata->rtc->irq_lock);
return IRQ_HANDLED;
}
static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled);
return 0;
}
/*
* This function reads the current RTC time into tm in Gregorian date.
*/
static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
time64_t val;
/* Avoid roll-over from reading the different registers */
do {
val = get_alarm_or_time(dev, MXC_RTC_TIME);
} while (val != get_alarm_or_time(dev, MXC_RTC_TIME));
rtc_time64_to_tm(val, tm);
return 0;
}
/*
* This function sets the internal RTC time based on tm in Gregorian date.
*/
static int mxc_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
time64_t time = rtc_tm_to_time64(tm);
/* Avoid roll-over from reading the different registers */
do {
set_alarm_or_time(dev, MXC_RTC_TIME, time);
} while (time != get_alarm_or_time(dev, MXC_RTC_TIME));
return 0;
}
/*
* This function reads the current alarm value into the passed in 'alrm'
* argument. It updates the alrm's pending field value based on the whether
* an alarm interrupt occurs or not.
*/
static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
void __iomem *ioaddr = pdata->ioaddr;
rtc_time64_to_tm(get_alarm_or_time(dev, MXC_RTC_ALARM), &alrm->time);
alrm->pending = ((readw(ioaddr + RTC_RTCISR) & RTC_ALM_BIT)) ? 1 : 0;
return 0;
}
/*
* This function sets the RTC alarm based on passed in alrm.
*/
static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_plat_data *pdata = dev_get_drvdata(dev);
rtc_update_alarm(dev, &alrm->time);
memcpy(&pdata->g_rtc_alarm, &alrm->time, sizeof(struct rtc_time));
mxc_rtc_irq_enable(dev, RTC_ALM_BIT, alrm->enabled);
return 0;
}
/* RTC layer */
static const struct rtc_class_ops mxc_rtc_ops = {
.read_time = mxc_rtc_read_time,
.set_time = mxc_rtc_set_time,
.read_alarm = mxc_rtc_read_alarm,
.set_alarm = mxc_rtc_set_alarm,
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
};
static int mxc_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct rtc_plat_data *pdata = NULL;
u32 reg;
unsigned long rate;
int ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
pdata->rtc = rtc;
rtc->ops = &mxc_rtc_ops;
if (is_imx1_rtc(pdata)) {
struct rtc_time tm;
/* 9bit days + hours minutes seconds */
rtc->range_max = (1 << 9) * 86400 - 1;
/*
* Set the start date as beginning of the current year. This can
* be overridden using device tree.
*/
rtc_time64_to_tm(ktime_get_real_seconds(), &tm);
rtc->start_secs = mktime64(tm.tm_year, 1, 1, 0, 0, 0);
rtc->set_start_time = true;
} else {
/* 16bit days + hours minutes seconds */
rtc->range_max = (1 << 16) * 86400ULL - 1;
}
pdata->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
if (IS_ERR(pdata->clk_ipg)) {
dev_err(&pdev->dev, "unable to get ipg clock!\n");
return PTR_ERR(pdata->clk_ipg);
}
pdata->clk_ref = devm_clk_get_enabled(&pdev->dev, "ref");
if (IS_ERR(pdata->clk_ref)) {
dev_err(&pdev->dev, "unable to get ref clock!\n");
return PTR_ERR(pdata->clk_ref);
}
rate = clk_get_rate(pdata->clk_ref);
if (rate == 32768)
reg = RTC_INPUT_CLK_32768HZ;
else if (rate == 32000)
reg = RTC_INPUT_CLK_32000HZ;
else if (rate == 38400)
reg = RTC_INPUT_CLK_38400HZ;
else {
dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
return -EINVAL;
}
reg |= RTC_ENABLE_BIT;
writew(reg, (pdata->ioaddr + RTC_RTCCTL));
if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
dev_err(&pdev->dev, "hardware module can't be enabled!\n");
return -EIO;
}
platform_set_drvdata(pdev, pdata);
/* Configure and enable the RTC */
pdata->irq = platform_get_irq(pdev, 0);
if (pdata->irq >= 0 &&
devm_request_irq(&pdev->dev, pdata->irq, mxc_rtc_interrupt,
IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = -1;
}
if (pdata->irq >= 0) {
device_init_wakeup(&pdev->dev, 1);
ret = dev_pm_set_wake_irq(&pdev->dev, pdata->irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
}
ret = devm_rtc_register_device(rtc);
return ret;
}
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
.of_match_table = imx_rtc_dt_ids,
},
.probe = mxc_rtc_probe,
};
module_platform_driver(mxc_rtc_driver)
MODULE_AUTHOR("Daniel Mack <[email protected]>");
MODULE_DESCRIPTION("RTC driver for Freescale MXC");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-mxc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Motorola CPCAP PMIC RTC driver
*
* Based on cpcap-regulator.c from Motorola Linux kernel tree
* Copyright (C) 2009 Motorola, Inc.
*
* Rewritten for mainline kernel
* - use DT
* - use regmap
* - use standard interrupt framework
* - use managed device resources
* - remove custom "secure clock daemon" helpers
*
* Copyright (C) 2017 Sebastian Reichel <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/err.h>
#include <linux/regmap.h>
#include <linux/mfd/motorola-cpcap.h>
#include <linux/slab.h>
#include <linux/sched.h>
#define SECS_PER_DAY 86400
#define DAY_MASK 0x7FFF
#define TOD1_MASK 0x00FF
#define TOD2_MASK 0x01FF
struct cpcap_time {
int day;
int tod1;
int tod2;
};
struct cpcap_rtc {
struct regmap *regmap;
struct rtc_device *rtc_dev;
u16 vendor;
int alarm_irq;
bool alarm_enabled;
int update_irq;
bool update_enabled;
};
static void cpcap2rtc_time(struct rtc_time *rtc, struct cpcap_time *cpcap)
{
unsigned long int tod;
unsigned long int time;
tod = (cpcap->tod1 & TOD1_MASK) | ((cpcap->tod2 & TOD2_MASK) << 8);
time = tod + ((cpcap->day & DAY_MASK) * SECS_PER_DAY);
rtc_time64_to_tm(time, rtc);
}
static void rtc2cpcap_time(struct cpcap_time *cpcap, struct rtc_time *rtc)
{
unsigned long time;
time = rtc_tm_to_time64(rtc);
cpcap->day = time / SECS_PER_DAY;
time %= SECS_PER_DAY;
cpcap->tod2 = (time >> 8) & TOD2_MASK;
cpcap->tod1 = time & TOD1_MASK;
}
static int cpcap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cpcap_rtc *rtc = dev_get_drvdata(dev);
if (rtc->alarm_enabled == enabled)
return 0;
if (enabled)
enable_irq(rtc->alarm_irq);
else
disable_irq(rtc->alarm_irq);
rtc->alarm_enabled = !!enabled;
return 0;
}
static int cpcap_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct cpcap_rtc *rtc;
struct cpcap_time cpcap_tm;
int temp_tod2;
int ret;
rtc = dev_get_drvdata(dev);
ret = regmap_read(rtc->regmap, CPCAP_REG_TOD2, &temp_tod2);
ret |= regmap_read(rtc->regmap, CPCAP_REG_DAY, &cpcap_tm.day);
ret |= regmap_read(rtc->regmap, CPCAP_REG_TOD1, &cpcap_tm.tod1);
ret |= regmap_read(rtc->regmap, CPCAP_REG_TOD2, &cpcap_tm.tod2);
if (temp_tod2 > cpcap_tm.tod2)
ret |= regmap_read(rtc->regmap, CPCAP_REG_DAY, &cpcap_tm.day);
if (ret) {
dev_err(dev, "Failed to read time\n");
return -EIO;
}
cpcap2rtc_time(tm, &cpcap_tm);
return 0;
}
static int cpcap_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct cpcap_rtc *rtc;
struct cpcap_time cpcap_tm;
int ret = 0;
rtc = dev_get_drvdata(dev);
rtc2cpcap_time(&cpcap_tm, tm);
if (rtc->alarm_enabled)
disable_irq(rtc->alarm_irq);
if (rtc->update_enabled)
disable_irq(rtc->update_irq);
if (rtc->vendor == CPCAP_VENDOR_ST) {
/* The TOD1 and TOD2 registers MUST be written in this order
* for the change to properly set.
*/
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD1,
TOD1_MASK, cpcap_tm.tod1);
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD2,
TOD2_MASK, cpcap_tm.tod2);
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_DAY,
DAY_MASK, cpcap_tm.day);
} else {
/* Clearing the upper lower 8 bits of the TOD guarantees that
* the upper half of TOD (TOD2) will not increment for 0xFF RTC
* ticks (255 seconds). During this time we can safely write
* to DAY, TOD2, then TOD1 (in that order) and expect RTC to be
* synchronized to the exact time requested upon the final write
* to TOD1.
*/
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD1,
TOD1_MASK, 0);
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_DAY,
DAY_MASK, cpcap_tm.day);
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD2,
TOD2_MASK, cpcap_tm.tod2);
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TOD1,
TOD1_MASK, cpcap_tm.tod1);
}
if (rtc->update_enabled)
enable_irq(rtc->update_irq);
if (rtc->alarm_enabled)
enable_irq(rtc->alarm_irq);
return ret;
}
static int cpcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct cpcap_rtc *rtc;
struct cpcap_time cpcap_tm;
int ret;
rtc = dev_get_drvdata(dev);
alrm->enabled = rtc->alarm_enabled;
ret = regmap_read(rtc->regmap, CPCAP_REG_DAYA, &cpcap_tm.day);
ret |= regmap_read(rtc->regmap, CPCAP_REG_TODA2, &cpcap_tm.tod2);
ret |= regmap_read(rtc->regmap, CPCAP_REG_TODA1, &cpcap_tm.tod1);
if (ret) {
dev_err(dev, "Failed to read time\n");
return -EIO;
}
cpcap2rtc_time(&alrm->time, &cpcap_tm);
return rtc_valid_tm(&alrm->time);
}
static int cpcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct cpcap_rtc *rtc;
struct cpcap_time cpcap_tm;
int ret;
rtc = dev_get_drvdata(dev);
rtc2cpcap_time(&cpcap_tm, &alrm->time);
if (rtc->alarm_enabled)
disable_irq(rtc->alarm_irq);
ret = regmap_update_bits(rtc->regmap, CPCAP_REG_DAYA, DAY_MASK,
cpcap_tm.day);
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TODA2, TOD2_MASK,
cpcap_tm.tod2);
ret |= regmap_update_bits(rtc->regmap, CPCAP_REG_TODA1, TOD1_MASK,
cpcap_tm.tod1);
if (!ret) {
enable_irq(rtc->alarm_irq);
rtc->alarm_enabled = true;
}
return ret;
}
static const struct rtc_class_ops cpcap_rtc_ops = {
.read_time = cpcap_rtc_read_time,
.set_time = cpcap_rtc_set_time,
.read_alarm = cpcap_rtc_read_alarm,
.set_alarm = cpcap_rtc_set_alarm,
.alarm_irq_enable = cpcap_rtc_alarm_irq_enable,
};
static irqreturn_t cpcap_rtc_alarm_irq(int irq, void *data)
{
struct cpcap_rtc *rtc = data;
rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
return IRQ_HANDLED;
}
static irqreturn_t cpcap_rtc_update_irq(int irq, void *data)
{
struct cpcap_rtc *rtc = data;
rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
return IRQ_HANDLED;
}
static int cpcap_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cpcap_rtc *rtc;
int err;
rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->regmap = dev_get_regmap(dev->parent, NULL);
if (!rtc->regmap)
return -ENODEV;
platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
rtc->rtc_dev->ops = &cpcap_rtc_ops;
rtc->rtc_dev->range_max = (timeu64_t) (DAY_MASK + 1) * SECS_PER_DAY - 1;
err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor);
if (err)
return err;
rtc->alarm_irq = platform_get_irq(pdev, 0);
err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL,
cpcap_rtc_alarm_irq,
IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"rtc_alarm", rtc);
if (err) {
dev_err(dev, "Could not request alarm irq: %d\n", err);
return err;
}
disable_irq(rtc->alarm_irq);
/* Stock Android uses the 1 Hz interrupt for "secure clock daemon",
* which is not supported by the mainline kernel. The mainline kernel
* does not use the irq at the moment, but we explicitly request and
* disable it, so that its masked and does not wake up the processor
* every second.
*/
rtc->update_irq = platform_get_irq(pdev, 1);
err = devm_request_threaded_irq(dev, rtc->update_irq, NULL,
cpcap_rtc_update_irq,
IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"rtc_1hz", rtc);
if (err) {
dev_err(dev, "Could not request update irq: %d\n", err);
return err;
}
disable_irq(rtc->update_irq);
err = device_init_wakeup(dev, 1);
if (err) {
dev_err(dev, "wakeup initialization failed (%d)\n", err);
/* ignore error and continue without wakeup support */
}
return devm_rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id cpcap_rtc_of_match[] = {
{ .compatible = "motorola,cpcap-rtc", },
{},
};
MODULE_DEVICE_TABLE(of, cpcap_rtc_of_match);
static struct platform_driver cpcap_rtc_driver = {
.probe = cpcap_rtc_probe,
.driver = {
.name = "cpcap-rtc",
.of_match_table = cpcap_rtc_of_match,
},
};
module_platform_driver(cpcap_rtc_driver);
MODULE_ALIAS("platform:cpcap-rtc");
MODULE_DESCRIPTION("CPCAP RTC driver");
MODULE_AUTHOR("Sebastian Reichel <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-cpcap.c |
/*
* Ricoh RS5C313 RTC device/driver
* Copyright (C) 2007 Nobuhiro Iwamatsu
*
* 2005-09-19 modified by kogiidena
*
* Based on the old drivers/char/rs5c313_rtc.c by:
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
*
* Based on code written by Paul Gortmaker.
* Copyright (C) 1996 Paul Gortmaker
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Based on other minimal char device drivers, like Alan's
* watchdog, Ted's random, etc. etc.
*
* 1.07 Paul Gortmaker.
* 1.08 Miquel van Smoorenburg: disallow certain things on the
* DEC Alpha as the CMOS clock is also used for other things.
* 1.09 Nikita Schmidt: epoch support and some Alpha cleanup.
* 1.09a Pete Zaitcev: Sun SPARC
* 1.09b Jeff Garzik: Modularize, init cleanup
* 1.09c Jeff Garzik: SMP cleanup
* 1.10 Paul Barton-Davis: add support for async I/O
* 1.10a Andrea Arcangeli: Alpha updates
* 1.10b Andrew Morton: SMP lock fix
* 1.10c Cesar Barros: SMP locking fixes and cleanup
* 1.10d Paul Gortmaker: delete paranoia check in rtc_exit
* 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness.
* 1.11 Takashi Iwai: Kernel access functions
* rtc_register/rtc_unregister/rtc_control
* 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init
* 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer
* CONFIG_HPET_EMULATE_RTC
* 1.13 Nobuhiro Iwamatsu: Update driver.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/err.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/io.h>
#define DRV_NAME "rs5c313"
#ifdef CONFIG_SH_LANDISK
/*****************************************************/
/* LANDISK dependence part of RS5C313 */
/*****************************************************/
#define SCSMR1 0xFFE00000
#define SCSCR1 0xFFE00008
#define SCSMR1_CA 0x80
#define SCSCR1_CKE 0x03
#define SCSPTR1 0xFFE0001C
#define SCSPTR1_EIO 0x80
#define SCSPTR1_SPB1IO 0x08
#define SCSPTR1_SPB1DT 0x04
#define SCSPTR1_SPB0IO 0x02
#define SCSPTR1_SPB0DT 0x01
#define SDA_OEN SCSPTR1_SPB1IO
#define SDA SCSPTR1_SPB1DT
#define SCL_OEN SCSPTR1_SPB0IO
#define SCL SCSPTR1_SPB0DT
/* RICOH RS5C313 CE port */
#define RS5C313_CE 0xB0000003
/* RICOH RS5C313 CE port bit */
#define RS5C313_CE_RTCCE 0x02
/* SCSPTR1 data */
unsigned char scsptr1_data;
#define RS5C313_CEENABLE __raw_writeb(RS5C313_CE_RTCCE, RS5C313_CE);
#define RS5C313_CEDISABLE __raw_writeb(0x00, RS5C313_CE)
#define RS5C313_MISCOP __raw_writeb(0x02, 0xB0000008)
static void rs5c313_init_port(void)
{
/* Set SCK as I/O port and Initialize SCSPTR1 data & I/O port. */
__raw_writeb(__raw_readb(SCSMR1) & ~SCSMR1_CA, SCSMR1);
__raw_writeb(__raw_readb(SCSCR1) & ~SCSCR1_CKE, SCSCR1);
/* And Initialize SCL for RS5C313 clock */
scsptr1_data = __raw_readb(SCSPTR1) | SCL; /* SCL:H */
__raw_writeb(scsptr1_data, SCSPTR1);
scsptr1_data = __raw_readb(SCSPTR1) | SCL_OEN; /* SCL output enable */
__raw_writeb(scsptr1_data, SCSPTR1);
RS5C313_CEDISABLE; /* CE:L */
}
static void rs5c313_write_data(unsigned char data)
{
int i;
for (i = 0; i < 8; i++) {
/* SDA:Write Data */
scsptr1_data = (scsptr1_data & ~SDA) |
((((0x80 >> i) & data) >> (7 - i)) << 2);
__raw_writeb(scsptr1_data, SCSPTR1);
if (i == 0) {
scsptr1_data |= SDA_OEN; /* SDA:output enable */
__raw_writeb(scsptr1_data, SCSPTR1);
}
ndelay(700);
scsptr1_data &= ~SCL; /* SCL:L */
__raw_writeb(scsptr1_data, SCSPTR1);
ndelay(700);
scsptr1_data |= SCL; /* SCL:H */
__raw_writeb(scsptr1_data, SCSPTR1);
}
scsptr1_data &= ~SDA_OEN; /* SDA:output disable */
__raw_writeb(scsptr1_data, SCSPTR1);
}
static unsigned char rs5c313_read_data(void)
{
int i;
unsigned char data = 0;
for (i = 0; i < 8; i++) {
ndelay(700);
/* SDA:Read Data */
data |= ((__raw_readb(SCSPTR1) & SDA) >> 2) << (7 - i);
scsptr1_data &= ~SCL; /* SCL:L */
__raw_writeb(scsptr1_data, SCSPTR1);
ndelay(700);
scsptr1_data |= SCL; /* SCL:H */
__raw_writeb(scsptr1_data, SCSPTR1);
}
return data & 0x0F;
}
#endif /* CONFIG_SH_LANDISK */
/*****************************************************/
/* machine independence part of RS5C313 */
/*****************************************************/
/* RICOH RS5C313 address */
#define RS5C313_ADDR_SEC 0x00
#define RS5C313_ADDR_SEC10 0x01
#define RS5C313_ADDR_MIN 0x02
#define RS5C313_ADDR_MIN10 0x03
#define RS5C313_ADDR_HOUR 0x04
#define RS5C313_ADDR_HOUR10 0x05
#define RS5C313_ADDR_WEEK 0x06
#define RS5C313_ADDR_INTINTVREG 0x07
#define RS5C313_ADDR_DAY 0x08
#define RS5C313_ADDR_DAY10 0x09
#define RS5C313_ADDR_MON 0x0A
#define RS5C313_ADDR_MON10 0x0B
#define RS5C313_ADDR_YEAR 0x0C
#define RS5C313_ADDR_YEAR10 0x0D
#define RS5C313_ADDR_CNTREG 0x0E
#define RS5C313_ADDR_TESTREG 0x0F
/* RICOH RS5C313 control register */
#define RS5C313_CNTREG_ADJ_BSY 0x01
#define RS5C313_CNTREG_WTEN_XSTP 0x02
#define RS5C313_CNTREG_12_24 0x04
#define RS5C313_CNTREG_CTFG 0x08
/* RICOH RS5C313 test register */
#define RS5C313_TESTREG_TEST 0x01
/* RICOH RS5C313 control bit */
#define RS5C313_CNTBIT_READ 0x40
#define RS5C313_CNTBIT_AD 0x20
#define RS5C313_CNTBIT_DT 0x10
static unsigned char rs5c313_read_reg(unsigned char addr)
{
rs5c313_write_data(addr | RS5C313_CNTBIT_READ | RS5C313_CNTBIT_AD);
return rs5c313_read_data();
}
static void rs5c313_write_reg(unsigned char addr, unsigned char data)
{
data &= 0x0f;
rs5c313_write_data(addr | RS5C313_CNTBIT_AD);
rs5c313_write_data(data | RS5C313_CNTBIT_DT);
return;
}
static inline unsigned char rs5c313_read_cntreg(void)
{
return rs5c313_read_reg(RS5C313_ADDR_CNTREG);
}
static inline void rs5c313_write_cntreg(unsigned char data)
{
rs5c313_write_reg(RS5C313_ADDR_CNTREG, data);
}
static inline void rs5c313_write_intintvreg(unsigned char data)
{
rs5c313_write_reg(RS5C313_ADDR_INTINTVREG, data);
}
static int rs5c313_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
int data;
int cnt;
cnt = 0;
while (1) {
RS5C313_CEENABLE; /* CE:H */
/* Initialize control reg. 24 hour */
rs5c313_write_cntreg(0x04);
if (!(rs5c313_read_cntreg() & RS5C313_CNTREG_ADJ_BSY))
break;
RS5C313_CEDISABLE;
ndelay(700); /* CE:L */
if (cnt++ > 100) {
dev_err(dev, "%s: timeout error\n", __func__);
return -EIO;
}
}
data = rs5c313_read_reg(RS5C313_ADDR_SEC);
data |= (rs5c313_read_reg(RS5C313_ADDR_SEC10) << 4);
tm->tm_sec = bcd2bin(data);
data = rs5c313_read_reg(RS5C313_ADDR_MIN);
data |= (rs5c313_read_reg(RS5C313_ADDR_MIN10) << 4);
tm->tm_min = bcd2bin(data);
data = rs5c313_read_reg(RS5C313_ADDR_HOUR);
data |= (rs5c313_read_reg(RS5C313_ADDR_HOUR10) << 4);
tm->tm_hour = bcd2bin(data);
data = rs5c313_read_reg(RS5C313_ADDR_DAY);
data |= (rs5c313_read_reg(RS5C313_ADDR_DAY10) << 4);
tm->tm_mday = bcd2bin(data);
data = rs5c313_read_reg(RS5C313_ADDR_MON);
data |= (rs5c313_read_reg(RS5C313_ADDR_MON10) << 4);
tm->tm_mon = bcd2bin(data) - 1;
data = rs5c313_read_reg(RS5C313_ADDR_YEAR);
data |= (rs5c313_read_reg(RS5C313_ADDR_YEAR10) << 4);
tm->tm_year = bcd2bin(data);
if (tm->tm_year < 70)
tm->tm_year += 100;
data = rs5c313_read_reg(RS5C313_ADDR_WEEK);
tm->tm_wday = bcd2bin(data);
RS5C313_CEDISABLE;
ndelay(700); /* CE:L */
return 0;
}
static int rs5c313_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
int data;
int cnt;
cnt = 0;
/* busy check. */
while (1) {
RS5C313_CEENABLE; /* CE:H */
/* Initialize control reg. 24 hour */
rs5c313_write_cntreg(0x04);
if (!(rs5c313_read_cntreg() & RS5C313_CNTREG_ADJ_BSY))
break;
RS5C313_MISCOP;
RS5C313_CEDISABLE;
ndelay(700); /* CE:L */
if (cnt++ > 100) {
dev_err(dev, "%s: timeout error\n", __func__);
return -EIO;
}
}
data = bin2bcd(tm->tm_sec);
rs5c313_write_reg(RS5C313_ADDR_SEC, data);
rs5c313_write_reg(RS5C313_ADDR_SEC10, (data >> 4));
data = bin2bcd(tm->tm_min);
rs5c313_write_reg(RS5C313_ADDR_MIN, data);
rs5c313_write_reg(RS5C313_ADDR_MIN10, (data >> 4));
data = bin2bcd(tm->tm_hour);
rs5c313_write_reg(RS5C313_ADDR_HOUR, data);
rs5c313_write_reg(RS5C313_ADDR_HOUR10, (data >> 4));
data = bin2bcd(tm->tm_mday);
rs5c313_write_reg(RS5C313_ADDR_DAY, data);
rs5c313_write_reg(RS5C313_ADDR_DAY10, (data >> 4));
data = bin2bcd(tm->tm_mon + 1);
rs5c313_write_reg(RS5C313_ADDR_MON, data);
rs5c313_write_reg(RS5C313_ADDR_MON10, (data >> 4));
data = bin2bcd(tm->tm_year % 100);
rs5c313_write_reg(RS5C313_ADDR_YEAR, data);
rs5c313_write_reg(RS5C313_ADDR_YEAR10, (data >> 4));
data = bin2bcd(tm->tm_wday);
rs5c313_write_reg(RS5C313_ADDR_WEEK, data);
RS5C313_CEDISABLE; /* CE:H */
ndelay(700);
return 0;
}
static void rs5c313_check_xstp_bit(void)
{
struct rtc_time tm;
int cnt;
RS5C313_CEENABLE; /* CE:H */
if (rs5c313_read_cntreg() & RS5C313_CNTREG_WTEN_XSTP) {
/* INT interval reg. OFF */
rs5c313_write_intintvreg(0x00);
/* Initialize control reg. 24 hour & adjust */
rs5c313_write_cntreg(0x07);
/* busy check. */
for (cnt = 0; cnt < 100; cnt++) {
if (!(rs5c313_read_cntreg() & RS5C313_CNTREG_ADJ_BSY))
break;
RS5C313_MISCOP;
}
memset(&tm, 0, sizeof(struct rtc_time));
tm.tm_mday = 1;
tm.tm_mon = 1 - 1;
tm.tm_year = 2000 - 1900;
rs5c313_rtc_set_time(NULL, &tm);
pr_err("invalid value, resetting to 1 Jan 2000\n");
}
RS5C313_CEDISABLE;
ndelay(700); /* CE:L */
}
static const struct rtc_class_ops rs5c313_rtc_ops = {
.read_time = rs5c313_rtc_read_time,
.set_time = rs5c313_rtc_set_time,
};
static int rs5c313_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
rs5c313_init_port();
rs5c313_check_xstp_bit();
rtc = devm_rtc_device_register(&pdev->dev, "rs5c313", &rs5c313_rtc_ops,
THIS_MODULE);
return PTR_ERR_OR_ZERO(rtc);
}
static struct platform_driver rs5c313_rtc_platform_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = rs5c313_rtc_probe,
};
module_platform_driver(rs5c313_rtc_platform_driver);
MODULE_AUTHOR("kogiidena , Nobuhiro Iwamatsu <[email protected]>");
MODULE_DESCRIPTION("Ricoh RS5C313 RTC device driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/rtc/rtc-rs5c313.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the SGS-Thomson M48T35 Timekeeper RAM chip
*
* Copyright (C) 2000 Silicon Graphics, Inc.
* Written by Ulf Carlsson ([email protected])
*
* Copyright (C) 2008 Thomas Bogendoerfer
*
* Based on code written by Paul Gortmaker.
*/
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
#include <linux/io.h>
#include <linux/err.h>
struct m48t35_rtc {
u8 pad[0x7ff8]; /* starts at 0x7ff8 */
#ifdef CONFIG_SGI_IP27
u8 hour;
u8 min;
u8 sec;
u8 control;
u8 year;
u8 month;
u8 date;
u8 day;
#else
u8 control;
u8 sec;
u8 min;
u8 hour;
u8 day;
u8 date;
u8 month;
u8 year;
#endif
};
#define M48T35_RTC_SET 0x80
#define M48T35_RTC_READ 0x40
struct m48t35_priv {
struct rtc_device *rtc;
struct m48t35_rtc __iomem *reg;
size_t size;
unsigned long baseaddr;
spinlock_t lock;
};
static int m48t35_read_time(struct device *dev, struct rtc_time *tm)
{
struct m48t35_priv *priv = dev_get_drvdata(dev);
u8 control;
/*
* Only the values that we read from the RTC are set. We leave
* tm_wday, tm_yday and tm_isdst untouched. Even though the
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irq(&priv->lock);
control = readb(&priv->reg->control);
writeb(control | M48T35_RTC_READ, &priv->reg->control);
tm->tm_sec = readb(&priv->reg->sec);
tm->tm_min = readb(&priv->reg->min);
tm->tm_hour = readb(&priv->reg->hour);
tm->tm_mday = readb(&priv->reg->date);
tm->tm_mon = readb(&priv->reg->month);
tm->tm_year = readb(&priv->reg->year);
writeb(control, &priv->reg->control);
spin_unlock_irq(&priv->lock);
tm->tm_sec = bcd2bin(tm->tm_sec);
tm->tm_min = bcd2bin(tm->tm_min);
tm->tm_hour = bcd2bin(tm->tm_hour);
tm->tm_mday = bcd2bin(tm->tm_mday);
tm->tm_mon = bcd2bin(tm->tm_mon);
tm->tm_year = bcd2bin(tm->tm_year);
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
tm->tm_year += 70;
if (tm->tm_year <= 69)
tm->tm_year += 100;
tm->tm_mon--;
return 0;
}
static int m48t35_set_time(struct device *dev, struct rtc_time *tm)
{
struct m48t35_priv *priv = dev_get_drvdata(dev);
unsigned char mon, day, hrs, min, sec;
unsigned int yrs;
u8 control;
yrs = tm->tm_year + 1900;
mon = tm->tm_mon + 1; /* tm_mon starts at zero */
day = tm->tm_mday;
hrs = tm->tm_hour;
min = tm->tm_min;
sec = tm->tm_sec;
if (yrs < 1970)
return -EINVAL;
yrs -= 1970;
if (yrs > 255) /* They are unsigned */
return -EINVAL;
if (yrs > 169)
return -EINVAL;
if (yrs >= 100)
yrs -= 100;
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
spin_lock_irq(&priv->lock);
control = readb(&priv->reg->control);
writeb(control | M48T35_RTC_SET, &priv->reg->control);
writeb(yrs, &priv->reg->year);
writeb(mon, &priv->reg->month);
writeb(day, &priv->reg->date);
writeb(hrs, &priv->reg->hour);
writeb(min, &priv->reg->min);
writeb(sec, &priv->reg->sec);
writeb(control, &priv->reg->control);
spin_unlock_irq(&priv->lock);
return 0;
}
static const struct rtc_class_ops m48t35_ops = {
.read_time = m48t35_read_time,
.set_time = m48t35_set_time,
};
static int m48t35_probe(struct platform_device *pdev)
{
struct resource *res;
struct m48t35_priv *priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
priv = devm_kzalloc(&pdev->dev, sizeof(struct m48t35_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->size = resource_size(res);
if (!devm_request_mem_region(&pdev->dev, res->start, priv->size,
pdev->name))
return -EBUSY;
priv->baseaddr = res->start;
priv->reg = devm_ioremap(&pdev->dev, priv->baseaddr, priv->size);
if (!priv->reg)
return -ENOMEM;
spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
priv->rtc = devm_rtc_device_register(&pdev->dev, "m48t35",
&m48t35_ops, THIS_MODULE);
return PTR_ERR_OR_ZERO(priv->rtc);
}
static struct platform_driver m48t35_platform_driver = {
.driver = {
.name = "rtc-m48t35",
},
.probe = m48t35_probe,
};
module_platform_driver(m48t35_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <[email protected]>");
MODULE_DESCRIPTION("M48T35 RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rtc-m48t35");
| linux-master | drivers/rtc/rtc-m48t35.c |
// SPDX-License-Identifier: GPL-2.0+
//
// RTC driver for Maxim MAX8998
//
// Copyright (C) 2010 Samsung Electronics Co.Ltd
// Author: Minkyu Kang <[email protected]>
// Author: Joonyoung Shim <[email protected]>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/bcd.h>
#include <linux/irqdomain.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/mfd/max8998.h>
#include <linux/mfd/max8998-private.h>
#include <linux/delay.h>
#define MAX8998_RTC_SEC 0x00
#define MAX8998_RTC_MIN 0x01
#define MAX8998_RTC_HOUR 0x02
#define MAX8998_RTC_WEEKDAY 0x03
#define MAX8998_RTC_DATE 0x04
#define MAX8998_RTC_MONTH 0x05
#define MAX8998_RTC_YEAR1 0x06
#define MAX8998_RTC_YEAR2 0x07
#define MAX8998_ALARM0_SEC 0x08
#define MAX8998_ALARM0_MIN 0x09
#define MAX8998_ALARM0_HOUR 0x0a
#define MAX8998_ALARM0_WEEKDAY 0x0b
#define MAX8998_ALARM0_DATE 0x0c
#define MAX8998_ALARM0_MONTH 0x0d
#define MAX8998_ALARM0_YEAR1 0x0e
#define MAX8998_ALARM0_YEAR2 0x0f
#define MAX8998_ALARM1_SEC 0x10
#define MAX8998_ALARM1_MIN 0x11
#define MAX8998_ALARM1_HOUR 0x12
#define MAX8998_ALARM1_WEEKDAY 0x13
#define MAX8998_ALARM1_DATE 0x14
#define MAX8998_ALARM1_MONTH 0x15
#define MAX8998_ALARM1_YEAR1 0x16
#define MAX8998_ALARM1_YEAR2 0x17
#define MAX8998_ALARM0_CONF 0x18
#define MAX8998_ALARM1_CONF 0x19
#define MAX8998_RTC_STATUS 0x1a
#define MAX8998_WTSR_SMPL_CNTL 0x1b
#define MAX8998_TEST 0x1f
#define HOUR_12 (1 << 7)
#define HOUR_PM (1 << 5)
#define ALARM0_STATUS (1 << 1)
#define ALARM1_STATUS (1 << 2)
enum {
RTC_SEC = 0,
RTC_MIN,
RTC_HOUR,
RTC_WEEKDAY,
RTC_DATE,
RTC_MONTH,
RTC_YEAR1,
RTC_YEAR2,
};
struct max8998_rtc_info {
struct device *dev;
struct max8998_dev *max8998;
struct i2c_client *rtc;
struct rtc_device *rtc_dev;
int irq;
bool lp3974_bug_workaround;
};
static void max8998_data_to_tm(u8 *data, struct rtc_time *tm)
{
tm->tm_sec = bcd2bin(data[RTC_SEC]);
tm->tm_min = bcd2bin(data[RTC_MIN]);
if (data[RTC_HOUR] & HOUR_12) {
tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x1f);
if (data[RTC_HOUR] & HOUR_PM)
tm->tm_hour += 12;
} else
tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x3f);
tm->tm_wday = data[RTC_WEEKDAY] & 0x07;
tm->tm_mday = bcd2bin(data[RTC_DATE]);
tm->tm_mon = bcd2bin(data[RTC_MONTH]);
tm->tm_year = bcd2bin(data[RTC_YEAR1]) + bcd2bin(data[RTC_YEAR2]) * 100;
tm->tm_year -= 1900;
}
static void max8998_tm_to_data(struct rtc_time *tm, u8 *data)
{
data[RTC_SEC] = bin2bcd(tm->tm_sec);
data[RTC_MIN] = bin2bcd(tm->tm_min);
data[RTC_HOUR] = bin2bcd(tm->tm_hour);
data[RTC_WEEKDAY] = tm->tm_wday;
data[RTC_DATE] = bin2bcd(tm->tm_mday);
data[RTC_MONTH] = bin2bcd(tm->tm_mon);
data[RTC_YEAR1] = bin2bcd(tm->tm_year % 100);
data[RTC_YEAR2] = bin2bcd((tm->tm_year + 1900) / 100);
}
static int max8998_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct max8998_rtc_info *info = dev_get_drvdata(dev);
u8 data[8];
int ret;
ret = max8998_bulk_read(info->rtc, MAX8998_RTC_SEC, 8, data);
if (ret < 0)
return ret;
max8998_data_to_tm(data, tm);
return 0;
}
static int max8998_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct max8998_rtc_info *info = dev_get_drvdata(dev);
u8 data[8];
int ret;
max8998_tm_to_data(tm, data);
ret = max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
if (info->lp3974_bug_workaround)
msleep(2000);
return ret;
}
static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct max8998_rtc_info *info = dev_get_drvdata(dev);
u8 data[8];
u8 val;
int ret;
ret = max8998_bulk_read(info->rtc, MAX8998_ALARM0_SEC, 8, data);
if (ret < 0)
return ret;
max8998_data_to_tm(data, &alrm->time);
ret = max8998_read_reg(info->rtc, MAX8998_ALARM0_CONF, &val);
if (ret < 0)
return ret;
alrm->enabled = !!val;
ret = max8998_read_reg(info->rtc, MAX8998_RTC_STATUS, &val);
if (ret < 0)
return ret;
if (val & ALARM0_STATUS)
alrm->pending = 1;
else
alrm->pending = 0;
return 0;
}
static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info)
{
int ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
if (info->lp3974_bug_workaround)
msleep(2000);
return ret;
}
static int max8998_rtc_start_alarm(struct max8998_rtc_info *info)
{
int ret;
u8 alarm0_conf = 0x77;
/* LP3974 with delay bug chips has rtc alarm bugs with "MONTH" field */
if (info->lp3974_bug_workaround)
alarm0_conf = 0x57;
ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, alarm0_conf);
if (info->lp3974_bug_workaround)
msleep(2000);
return ret;
}
static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct max8998_rtc_info *info = dev_get_drvdata(dev);
u8 data[8];
int ret;
max8998_tm_to_data(&alrm->time, data);
ret = max8998_rtc_stop_alarm(info);
if (ret < 0)
return ret;
ret = max8998_bulk_write(info->rtc, MAX8998_ALARM0_SEC, 8, data);
if (ret < 0)
return ret;
if (info->lp3974_bug_workaround)
msleep(2000);
if (alrm->enabled)
ret = max8998_rtc_start_alarm(info);
return ret;
}
static int max8998_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct max8998_rtc_info *info = dev_get_drvdata(dev);
if (enabled)
return max8998_rtc_start_alarm(info);
else
return max8998_rtc_stop_alarm(info);
}
static irqreturn_t max8998_rtc_alarm_irq(int irq, void *data)
{
struct max8998_rtc_info *info = data;
rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops max8998_rtc_ops = {
.read_time = max8998_rtc_read_time,
.set_time = max8998_rtc_set_time,
.read_alarm = max8998_rtc_read_alarm,
.set_alarm = max8998_rtc_set_alarm,
.alarm_irq_enable = max8998_rtc_alarm_irq_enable,
};
static int max8998_rtc_probe(struct platform_device *pdev)
{
struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent);
struct max8998_platform_data *pdata = max8998->pdata;
struct max8998_rtc_info *info;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(struct max8998_rtc_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->max8998 = max8998;
info->rtc = max8998->rtc;
platform_set_drvdata(pdev, info);
info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max8998-rtc",
&max8998_rtc_ops, THIS_MODULE);
if (IS_ERR(info->rtc_dev)) {
ret = PTR_ERR(info->rtc_dev);
dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
return ret;
}
if (!max8998->irq_domain)
goto no_irq;
info->irq = irq_create_mapping(max8998->irq_domain, MAX8998_IRQ_ALARM0);
if (!info->irq) {
dev_warn(&pdev->dev, "Failed to map alarm IRQ\n");
goto no_irq;
}
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
max8998_rtc_alarm_irq, 0, "rtc-alarm0", info);
if (ret < 0)
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
info->irq, ret);
no_irq:
dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
if (pdata && pdata->rtc_delay) {
info->lp3974_bug_workaround = true;
dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
" RTC updates will be extremely slow.\n");
}
return 0;
}
static const struct platform_device_id max8998_rtc_id[] = {
{ "max8998-rtc", TYPE_MAX8998 },
{ "lp3974-rtc", TYPE_LP3974 },
{ }
};
MODULE_DEVICE_TABLE(platform, max8998_rtc_id);
static struct platform_driver max8998_rtc_driver = {
.driver = {
.name = "max8998-rtc",
},
.probe = max8998_rtc_probe,
.id_table = max8998_rtc_id,
};
module_platform_driver(max8998_rtc_driver);
MODULE_AUTHOR("Minkyu Kang <[email protected]>");
MODULE_AUTHOR("Joonyoung Shim <[email protected]>");
MODULE_DESCRIPTION("Maxim MAX8998 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-max8998.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Oki MSM6242 RTC Driver
*
* Copyright 2009 Geert Uytterhoeven
*
* Based on the A2000 TOD code in arch/m68k/amiga/config.c
* Copyright (C) 1993 Hamish Macdonald
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
enum {
MSM6242_SECOND1 = 0x0, /* 1-second digit register */
MSM6242_SECOND10 = 0x1, /* 10-second digit register */
MSM6242_MINUTE1 = 0x2, /* 1-minute digit register */
MSM6242_MINUTE10 = 0x3, /* 10-minute digit register */
MSM6242_HOUR1 = 0x4, /* 1-hour digit register */
MSM6242_HOUR10 = 0x5, /* PM/AM, 10-hour digit register */
MSM6242_DAY1 = 0x6, /* 1-day digit register */
MSM6242_DAY10 = 0x7, /* 10-day digit register */
MSM6242_MONTH1 = 0x8, /* 1-month digit register */
MSM6242_MONTH10 = 0x9, /* 10-month digit register */
MSM6242_YEAR1 = 0xa, /* 1-year digit register */
MSM6242_YEAR10 = 0xb, /* 10-year digit register */
MSM6242_WEEK = 0xc, /* Week register */
MSM6242_CD = 0xd, /* Control Register D */
MSM6242_CE = 0xe, /* Control Register E */
MSM6242_CF = 0xf, /* Control Register F */
};
#define MSM6242_HOUR10_AM (0 << 2)
#define MSM6242_HOUR10_PM (1 << 2)
#define MSM6242_HOUR10_HR_MASK (3 << 0)
#define MSM6242_WEEK_SUNDAY 0
#define MSM6242_WEEK_MONDAY 1
#define MSM6242_WEEK_TUESDAY 2
#define MSM6242_WEEK_WEDNESDAY 3
#define MSM6242_WEEK_THURSDAY 4
#define MSM6242_WEEK_FRIDAY 5
#define MSM6242_WEEK_SATURDAY 6
#define MSM6242_CD_30_S_ADJ (1 << 3) /* 30-second adjustment */
#define MSM6242_CD_IRQ_FLAG (1 << 2)
#define MSM6242_CD_BUSY (1 << 1)
#define MSM6242_CD_HOLD (1 << 0)
#define MSM6242_CE_T_MASK (3 << 2)
#define MSM6242_CE_T_64HZ (0 << 2) /* period 1/64 second */
#define MSM6242_CE_T_1HZ (1 << 2) /* period 1 second */
#define MSM6242_CE_T_1MINUTE (2 << 2) /* period 1 minute */
#define MSM6242_CE_T_1HOUR (3 << 2) /* period 1 hour */
#define MSM6242_CE_ITRPT_STND (1 << 1)
#define MSM6242_CE_MASK (1 << 0) /* STD.P output control */
#define MSM6242_CF_TEST (1 << 3)
#define MSM6242_CF_12H (0 << 2)
#define MSM6242_CF_24H (1 << 2)
#define MSM6242_CF_STOP (1 << 1)
#define MSM6242_CF_REST (1 << 0) /* reset */
struct msm6242_priv {
u32 __iomem *regs;
struct rtc_device *rtc;
};
static inline unsigned int msm6242_read(struct msm6242_priv *priv,
unsigned int reg)
{
return __raw_readl(&priv->regs[reg]) & 0xf;
}
static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
unsigned int reg)
{
__raw_writel(val, &priv->regs[reg]);
}
static void msm6242_lock(struct msm6242_priv *priv)
{
int cnt = 5;
msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
while ((msm6242_read(priv, MSM6242_CD) & MSM6242_CD_BUSY) && cnt) {
msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
udelay(70);
msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
cnt--;
}
if (!cnt)
pr_warn("timed out waiting for RTC (0x%x)\n",
msm6242_read(priv, MSM6242_CD));
}
static void msm6242_unlock(struct msm6242_priv *priv)
{
msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
}
static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
{
struct msm6242_priv *priv = dev_get_drvdata(dev);
msm6242_lock(priv);
tm->tm_sec = msm6242_read(priv, MSM6242_SECOND10) * 10 +
msm6242_read(priv, MSM6242_SECOND1);
tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
msm6242_read(priv, MSM6242_MINUTE1);
tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) &
MSM6242_HOUR10_HR_MASK) * 10 +
msm6242_read(priv, MSM6242_HOUR1);
tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
msm6242_read(priv, MSM6242_DAY1);
tm->tm_wday = msm6242_read(priv, MSM6242_WEEK);
tm->tm_mon = msm6242_read(priv, MSM6242_MONTH10) * 10 +
msm6242_read(priv, MSM6242_MONTH1) - 1;
tm->tm_year = msm6242_read(priv, MSM6242_YEAR10) * 10 +
msm6242_read(priv, MSM6242_YEAR1);
if (tm->tm_year <= 69)
tm->tm_year += 100;
if (!(msm6242_read(priv, MSM6242_CF) & MSM6242_CF_24H)) {
unsigned int pm = msm6242_read(priv, MSM6242_HOUR10) &
MSM6242_HOUR10_PM;
if (!pm && tm->tm_hour == 12)
tm->tm_hour = 0;
else if (pm && tm->tm_hour != 12)
tm->tm_hour += 12;
}
msm6242_unlock(priv);
return 0;
}
static int msm6242_set_time(struct device *dev, struct rtc_time *tm)
{
struct msm6242_priv *priv = dev_get_drvdata(dev);
msm6242_lock(priv);
msm6242_write(priv, tm->tm_sec / 10, MSM6242_SECOND10);
msm6242_write(priv, tm->tm_sec % 10, MSM6242_SECOND1);
msm6242_write(priv, tm->tm_min / 10, MSM6242_MINUTE10);
msm6242_write(priv, tm->tm_min % 10, MSM6242_MINUTE1);
if (msm6242_read(priv, MSM6242_CF) & MSM6242_CF_24H)
msm6242_write(priv, tm->tm_hour / 10, MSM6242_HOUR10);
else if (tm->tm_hour >= 12)
msm6242_write(priv, MSM6242_HOUR10_PM + (tm->tm_hour - 12) / 10,
MSM6242_HOUR10);
else
msm6242_write(priv, tm->tm_hour / 10, MSM6242_HOUR10);
msm6242_write(priv, tm->tm_hour % 10, MSM6242_HOUR1);
msm6242_write(priv, tm->tm_mday / 10, MSM6242_DAY10);
msm6242_write(priv, tm->tm_mday % 10, MSM6242_DAY1);
if (tm->tm_wday != -1)
msm6242_write(priv, tm->tm_wday, MSM6242_WEEK);
msm6242_write(priv, (tm->tm_mon + 1) / 10, MSM6242_MONTH10);
msm6242_write(priv, (tm->tm_mon + 1) % 10, MSM6242_MONTH1);
if (tm->tm_year >= 100)
tm->tm_year -= 100;
msm6242_write(priv, tm->tm_year / 10, MSM6242_YEAR10);
msm6242_write(priv, tm->tm_year % 10, MSM6242_YEAR1);
msm6242_unlock(priv);
return 0;
}
static const struct rtc_class_ops msm6242_rtc_ops = {
.read_time = msm6242_read_time,
.set_time = msm6242_set_time,
};
static int __init msm6242_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct msm6242_priv *priv;
struct rtc_device *rtc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!priv->regs)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
rtc = devm_rtc_device_register(&pdev->dev, "rtc-msm6242",
&msm6242_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
priv->rtc = rtc;
return 0;
}
static struct platform_driver msm6242_rtc_driver = {
.driver = {
.name = "rtc-msm6242",
},
};
module_platform_driver_probe(msm6242_rtc_driver, msm6242_rtc_probe);
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Oki MSM6242 RTC driver");
MODULE_ALIAS("platform:rtc-msm6242");
| linux-master | drivers/rtc/rtc-msm6242.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* An I2C driver for Ricoh RS5C372, R2025S/D and RV5C38[67] RTCs
*
* Copyright (C) 2005 Pavel Mironchik <[email protected]>
* Copyright (C) 2006 Tower Technologies
* Copyright (C) 2008 Paul Mundt
*/
#include <linux/i2c.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
/*
* Ricoh has a family of I2C based RTCs, which differ only slightly from
* each other. Differences center on pinout (e.g. how many interrupts,
* output clock, etc) and how the control registers are used. The '372
* is significant only because that's the one this driver first supported.
*/
#define RS5C372_REG_SECS 0
#define RS5C372_REG_MINS 1
#define RS5C372_REG_HOURS 2
#define RS5C372_REG_WDAY 3
#define RS5C372_REG_DAY 4
#define RS5C372_REG_MONTH 5
#define RS5C372_REG_YEAR 6
#define RS5C372_REG_TRIM 7
# define RS5C372_TRIM_XSL 0x80 /* only if RS5C372[a|b] */
# define RS5C372_TRIM_MASK 0x7F
# define R2221TL_TRIM_DEV (1 << 7) /* only if R2221TL */
# define RS5C372_TRIM_DECR (1 << 6)
#define RS5C_REG_ALARM_A_MIN 8 /* or ALARM_W */
#define RS5C_REG_ALARM_A_HOURS 9
#define RS5C_REG_ALARM_A_WDAY 10
#define RS5C_REG_ALARM_B_MIN 11 /* or ALARM_D */
#define RS5C_REG_ALARM_B_HOURS 12
#define RS5C_REG_ALARM_B_WDAY 13 /* (ALARM_B only) */
#define RS5C_REG_CTRL1 14
# define RS5C_CTRL1_AALE (1 << 7) /* or WALE */
# define RS5C_CTRL1_BALE (1 << 6) /* or DALE */
# define RV5C387_CTRL1_24 (1 << 5)
# define RS5C372A_CTRL1_SL1 (1 << 5)
# define RS5C_CTRL1_CT_MASK (7 << 0)
# define RS5C_CTRL1_CT0 (0 << 0) /* no periodic irq */
# define RS5C_CTRL1_CT4 (4 << 0) /* 1 Hz level irq */
#define RS5C_REG_CTRL2 15
# define RS5C372_CTRL2_24 (1 << 5)
# define RS5C_CTRL2_XSTP (1 << 4) /* only if !R2x2x */
# define R2x2x_CTRL2_VDET (1 << 6) /* only if R2x2x */
# define R2x2x_CTRL2_XSTP (1 << 5) /* only if R2x2x */
# define R2x2x_CTRL2_PON (1 << 4) /* only if R2x2x */
# define RS5C_CTRL2_CTFG (1 << 2)
# define RS5C_CTRL2_AAFG (1 << 1) /* or WAFG */
# define RS5C_CTRL2_BAFG (1 << 0) /* or DAFG */
/* to read (style 1) or write registers starting at R */
#define RS5C_ADDR(R) (((R) << 4) | 0)
enum rtc_type {
rtc_undef = 0,
rtc_r2025sd,
rtc_r2221tl,
rtc_rs5c372a,
rtc_rs5c372b,
rtc_rv5c386,
rtc_rv5c387a,
};
static const struct i2c_device_id rs5c372_id[] = {
{ "r2025sd", rtc_r2025sd },
{ "r2221tl", rtc_r2221tl },
{ "rs5c372a", rtc_rs5c372a },
{ "rs5c372b", rtc_rs5c372b },
{ "rv5c386", rtc_rv5c386 },
{ "rv5c387a", rtc_rv5c387a },
{ }
};
MODULE_DEVICE_TABLE(i2c, rs5c372_id);
static const __maybe_unused struct of_device_id rs5c372_of_match[] = {
{
.compatible = "ricoh,r2025sd",
.data = (void *)rtc_r2025sd
},
{
.compatible = "ricoh,r2221tl",
.data = (void *)rtc_r2221tl
},
{
.compatible = "ricoh,rs5c372a",
.data = (void *)rtc_rs5c372a
},
{
.compatible = "ricoh,rs5c372b",
.data = (void *)rtc_rs5c372b
},
{
.compatible = "ricoh,rv5c386",
.data = (void *)rtc_rv5c386
},
{
.compatible = "ricoh,rv5c387a",
.data = (void *)rtc_rv5c387a
},
{ }
};
MODULE_DEVICE_TABLE(of, rs5c372_of_match);
/* REVISIT: this assumes that:
* - we're in the 21st century, so it's safe to ignore the century
* bit for rv5c38[67] (REG_MONTH bit 7);
* - we should use ALARM_A not ALARM_B (may be wrong on some boards)
*/
struct rs5c372 {
struct i2c_client *client;
struct rtc_device *rtc;
enum rtc_type type;
unsigned time24:1;
unsigned has_irq:1;
unsigned smbus:1;
char buf[17];
char *regs;
};
static int rs5c_get_regs(struct rs5c372 *rs5c)
{
struct i2c_client *client = rs5c->client;
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = sizeof(rs5c->buf),
.buf = rs5c->buf
},
};
/* This implements the third reading method from the datasheet, using
* an internal address that's reset after each transaction (by STOP)
* to 0x0f ... so we read extra registers, and skip the first one.
*
* The first method doesn't work with the iop3xx adapter driver, on at
* least 80219 chips; this works around that bug.
*
* The third method on the other hand doesn't work for the SMBus-only
* configurations, so we use the first method there, stripping off
* the extra register in the process.
*/
if (rs5c->smbus) {
int addr = RS5C_ADDR(RS5C372_REG_SECS);
int size = sizeof(rs5c->buf) - 1;
if (i2c_smbus_read_i2c_block_data(client, addr, size,
rs5c->buf + 1) != size) {
dev_warn(&client->dev, "can't read registers\n");
return -EIO;
}
} else {
if ((i2c_transfer(client->adapter, msgs, 1)) != 1) {
dev_warn(&client->dev, "can't read registers\n");
return -EIO;
}
}
dev_dbg(&client->dev,
"%3ph (%02x) %3ph (%02x), %3ph, %3ph; %02x %02x\n",
rs5c->regs + 0, rs5c->regs[3],
rs5c->regs + 4, rs5c->regs[7],
rs5c->regs + 8, rs5c->regs + 11,
rs5c->regs[14], rs5c->regs[15]);
return 0;
}
static unsigned rs5c_reg2hr(struct rs5c372 *rs5c, unsigned reg)
{
unsigned hour;
if (rs5c->time24)
return bcd2bin(reg & 0x3f);
hour = bcd2bin(reg & 0x1f);
if (hour == 12)
hour = 0;
if (reg & 0x20)
hour += 12;
return hour;
}
static unsigned rs5c_hr2reg(struct rs5c372 *rs5c, unsigned hour)
{
if (rs5c->time24)
return bin2bcd(hour);
if (hour > 12)
return 0x20 | bin2bcd(hour - 12);
if (hour == 12)
return 0x20 | bin2bcd(12);
if (hour == 0)
return bin2bcd(12);
return bin2bcd(hour);
}
static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
struct rs5c372 *rs5c = i2c_get_clientdata(client);
int status = rs5c_get_regs(rs5c);
unsigned char ctrl2 = rs5c->regs[RS5C_REG_CTRL2];
if (status < 0)
return status;
switch (rs5c->type) {
case rtc_r2025sd:
case rtc_r2221tl:
if ((rs5c->type == rtc_r2025sd && !(ctrl2 & R2x2x_CTRL2_XSTP)) ||
(rs5c->type == rtc_r2221tl && (ctrl2 & R2x2x_CTRL2_XSTP))) {
dev_warn(&client->dev, "rtc oscillator interruption detected. Please reset the rtc clock.\n");
return -EINVAL;
}
break;
default:
if (ctrl2 & RS5C_CTRL2_XSTP) {
dev_warn(&client->dev, "rtc oscillator interruption detected. Please reset the rtc clock.\n");
return -EINVAL;
}
}
tm->tm_sec = bcd2bin(rs5c->regs[RS5C372_REG_SECS] & 0x7f);
tm->tm_min = bcd2bin(rs5c->regs[RS5C372_REG_MINS] & 0x7f);
tm->tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C372_REG_HOURS]);
tm->tm_wday = bcd2bin(rs5c->regs[RS5C372_REG_WDAY] & 0x07);
tm->tm_mday = bcd2bin(rs5c->regs[RS5C372_REG_DAY] & 0x3f);
/* tm->tm_mon is zero-based */
tm->tm_mon = bcd2bin(rs5c->regs[RS5C372_REG_MONTH] & 0x1f) - 1;
/* year is 1900 + tm->tm_year */
tm->tm_year = bcd2bin(rs5c->regs[RS5C372_REG_YEAR]) + 100;
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
return 0;
}
static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
struct rs5c372 *rs5c = i2c_get_clientdata(client);
unsigned char buf[7];
unsigned char ctrl2;
int addr;
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
addr = RS5C_ADDR(RS5C372_REG_SECS);
buf[0] = bin2bcd(tm->tm_sec);
buf[1] = bin2bcd(tm->tm_min);
buf[2] = rs5c_hr2reg(rs5c, tm->tm_hour);
buf[3] = bin2bcd(tm->tm_wday);
buf[4] = bin2bcd(tm->tm_mday);
buf[5] = bin2bcd(tm->tm_mon + 1);
buf[6] = bin2bcd(tm->tm_year - 100);
if (i2c_smbus_write_i2c_block_data(client, addr, sizeof(buf), buf) < 0) {
dev_dbg(&client->dev, "%s: write error in line %i\n",
__func__, __LINE__);
return -EIO;
}
addr = RS5C_ADDR(RS5C_REG_CTRL2);
ctrl2 = i2c_smbus_read_byte_data(client, addr);
/* clear rtc warning bits */
switch (rs5c->type) {
case rtc_r2025sd:
case rtc_r2221tl:
ctrl2 &= ~(R2x2x_CTRL2_VDET | R2x2x_CTRL2_PON);
if (rs5c->type == rtc_r2025sd)
ctrl2 |= R2x2x_CTRL2_XSTP;
else
ctrl2 &= ~R2x2x_CTRL2_XSTP;
break;
default:
ctrl2 &= ~RS5C_CTRL2_XSTP;
break;
}
if (i2c_smbus_write_byte_data(client, addr, ctrl2) < 0) {
dev_dbg(&client->dev, "%s: write error in line %i\n",
__func__, __LINE__);
return -EIO;
}
return 0;
}
#if IS_ENABLED(CONFIG_RTC_INTF_PROC)
#define NEED_TRIM
#endif
#if IS_ENABLED(CONFIG_RTC_INTF_SYSFS)
#define NEED_TRIM
#endif
#ifdef NEED_TRIM
static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
{
struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
u8 tmp = rs5c372->regs[RS5C372_REG_TRIM];
if (osc) {
if (rs5c372->type == rtc_rs5c372a || rs5c372->type == rtc_rs5c372b)
*osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
else
*osc = 32768;
}
if (trim) {
dev_dbg(&client->dev, "%s: raw trim=%x\n", __func__, tmp);
tmp &= RS5C372_TRIM_MASK;
if (tmp & 0x3e) {
int t = tmp & 0x3f;
if (tmp & 0x40)
t = (~t | (s8)0xc0) + 1;
else
t = t - 1;
tmp = t * 2;
} else
tmp = 0;
*trim = tmp;
}
return 0;
}
#endif
static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
struct rs5c372 *rs5c = i2c_get_clientdata(client);
unsigned char buf;
int status, addr;
buf = rs5c->regs[RS5C_REG_CTRL1];
if (!rs5c->has_irq)
return -EINVAL;
status = rs5c_get_regs(rs5c);
if (status < 0)
return status;
addr = RS5C_ADDR(RS5C_REG_CTRL1);
if (enabled)
buf |= RS5C_CTRL1_AALE;
else
buf &= ~RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
dev_warn(dev, "can't update alarm\n");
status = -EIO;
} else
rs5c->regs[RS5C_REG_CTRL1] = buf;
return status;
}
/* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI,
* which only exposes a polled programming interface; and since
* these calls map directly to those EFI requests; we don't demand
* we have an IRQ for this chip when we go through this API.
*
* The older x86_pc derived RTC_ALM_{READ,SET} calls require irqs
* though, managed through RTC_AIE_{ON,OFF} requests.
*/
static int rs5c_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct i2c_client *client = to_i2c_client(dev);
struct rs5c372 *rs5c = i2c_get_clientdata(client);
int status;
status = rs5c_get_regs(rs5c);
if (status < 0)
return status;
/* report alarm time */
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
/* ... and status */
t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE);
t->pending = !!(rs5c->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_AAFG);
return 0;
}
static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct i2c_client *client = to_i2c_client(dev);
struct rs5c372 *rs5c = i2c_get_clientdata(client);
int status, addr, i;
unsigned char buf[3];
/* only handle up to 24 hours in the future, like RTC_ALM_SET */
if (t->time.tm_mday != -1
|| t->time.tm_mon != -1
|| t->time.tm_year != -1)
return -EINVAL;
/* REVISIT: round up tm_sec */
/* if needed, disable irq (clears pending status) */
status = rs5c_get_regs(rs5c);
if (status < 0)
return status;
if (rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE) {
addr = RS5C_ADDR(RS5C_REG_CTRL1);
buf[0] = rs5c->regs[RS5C_REG_CTRL1] & ~RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf[0]) < 0) {
dev_dbg(dev, "can't disable alarm\n");
return -EIO;
}
rs5c->regs[RS5C_REG_CTRL1] = buf[0];
}
/* set alarm */
buf[0] = bin2bcd(t->time.tm_min);
buf[1] = rs5c_hr2reg(rs5c, t->time.tm_hour);
buf[2] = 0x7f; /* any/all days */
for (i = 0; i < sizeof(buf); i++) {
addr = RS5C_ADDR(RS5C_REG_ALARM_A_MIN + i);
if (i2c_smbus_write_byte_data(client, addr, buf[i]) < 0) {
dev_dbg(dev, "can't set alarm time\n");
return -EIO;
}
}
/* ... and maybe enable its irq */
if (t->enabled) {
addr = RS5C_ADDR(RS5C_REG_CTRL1);
buf[0] = rs5c->regs[RS5C_REG_CTRL1] | RS5C_CTRL1_AALE;
if (i2c_smbus_write_byte_data(client, addr, buf[0]) < 0)
dev_warn(dev, "can't enable alarm\n");
rs5c->regs[RS5C_REG_CTRL1] = buf[0];
}
return 0;
}
#if IS_ENABLED(CONFIG_RTC_INTF_PROC)
static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
{
int err, osc, trim;
err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim);
if (err == 0) {
seq_printf(seq, "crystal\t\t: %d.%03d KHz\n",
osc / 1000, osc % 1000);
seq_printf(seq, "trim\t\t: %d\n", trim);
}
return 0;
}
#else
#define rs5c372_rtc_proc NULL
#endif
#ifdef CONFIG_RTC_INTF_DEV
static int rs5c372_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
unsigned char ctrl2;
int addr;
unsigned int flags;
dev_dbg(dev, "%s: cmd=%x\n", __func__, cmd);
addr = RS5C_ADDR(RS5C_REG_CTRL2);
ctrl2 = i2c_smbus_read_byte_data(rs5c->client, addr);
switch (cmd) {
case RTC_VL_READ:
flags = 0;
switch (rs5c->type) {
case rtc_r2025sd:
case rtc_r2221tl:
if ((rs5c->type == rtc_r2025sd && !(ctrl2 & R2x2x_CTRL2_XSTP)) ||
(rs5c->type == rtc_r2221tl && (ctrl2 & R2x2x_CTRL2_XSTP))) {
flags |= RTC_VL_DATA_INVALID;
}
if (ctrl2 & R2x2x_CTRL2_VDET)
flags |= RTC_VL_BACKUP_LOW;
break;
default:
if (ctrl2 & RS5C_CTRL2_XSTP)
flags |= RTC_VL_DATA_INVALID;
break;
}
return put_user(flags, (unsigned int __user *)arg);
case RTC_VL_CLR:
/* clear VDET bit */
if (rs5c->type == rtc_r2025sd || rs5c->type == rtc_r2221tl) {
ctrl2 &= ~R2x2x_CTRL2_VDET;
if (i2c_smbus_write_byte_data(rs5c->client, addr, ctrl2) < 0) {
dev_dbg(&rs5c->client->dev, "%s: write error in line %i\n",
__func__, __LINE__);
return -EIO;
}
}
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#else
#define rs5c372_ioctl NULL
#endif
static int rs5c372_read_offset(struct device *dev, long *offset)
{
struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
u8 val = rs5c->regs[RS5C372_REG_TRIM];
long ppb_per_step = 0;
bool decr = val & RS5C372_TRIM_DECR;
switch (rs5c->type) {
case rtc_r2221tl:
ppb_per_step = val & R2221TL_TRIM_DEV ? 1017 : 3051;
break;
case rtc_rs5c372a:
case rtc_rs5c372b:
ppb_per_step = val & RS5C372_TRIM_XSL ? 3125 : 3051;
break;
default:
ppb_per_step = 3051;
break;
}
/* Only bits[0:5] repsents the time counts */
val &= 0x3F;
/* If bits[1:5] are all 0, it means no increment or decrement */
if (!(val & 0x3E)) {
*offset = 0;
} else {
if (decr)
*offset = -(((~val) & 0x3F) + 1) * ppb_per_step;
else
*offset = (val - 1) * ppb_per_step;
}
return 0;
}
static int rs5c372_set_offset(struct device *dev, long offset)
{
struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
int addr = RS5C_ADDR(RS5C372_REG_TRIM);
u8 val = 0;
u8 tmp = 0;
long ppb_per_step = 3051;
long steps = LONG_MIN;
switch (rs5c->type) {
case rtc_rs5c372a:
case rtc_rs5c372b:
tmp = rs5c->regs[RS5C372_REG_TRIM];
if (tmp & RS5C372_TRIM_XSL) {
ppb_per_step = 3125;
val |= RS5C372_TRIM_XSL;
}
break;
case rtc_r2221tl:
/*
* Check if it is possible to use high resolution mode (DEV=1).
* In this mode, the minimum resolution is 2 / (32768 * 20 * 3),
* which is about 1017 ppb.
*/
steps = DIV_ROUND_CLOSEST(offset, 1017);
if (steps >= -0x3E && steps <= 0x3E) {
ppb_per_step = 1017;
val |= R2221TL_TRIM_DEV;
} else {
/*
* offset is out of the range of high resolution mode.
* Try to use low resolution mode (DEV=0). In this mode,
* the minimum resolution is 2 / (32768 * 20), which is
* about 3051 ppb.
*/
steps = LONG_MIN;
}
break;
default:
break;
}
if (steps == LONG_MIN) {
steps = DIV_ROUND_CLOSEST(offset, ppb_per_step);
if (steps > 0x3E || steps < -0x3E)
return -ERANGE;
}
if (steps > 0) {
val |= steps + 1;
} else {
val |= RS5C372_TRIM_DECR;
val |= (~(-steps - 1)) & 0x3F;
}
if (!steps || !(val & 0x3E)) {
/*
* if offset is too small, set oscillation adjustment register
* or time trimming register with its default value whic means
* no increment or decrement. But for rs5c372[a|b], the XSL bit
* should be kept unchanged.
*/
if (rs5c->type == rtc_rs5c372a || rs5c->type == rtc_rs5c372b)
val &= RS5C372_TRIM_XSL;
else
val = 0;
}
dev_dbg(&rs5c->client->dev, "write 0x%x for offset %ld\n", val, offset);
if (i2c_smbus_write_byte_data(rs5c->client, addr, val) < 0) {
dev_err(&rs5c->client->dev, "failed to write 0x%x to reg %d\n", val, addr);
return -EIO;
}
rs5c->regs[RS5C372_REG_TRIM] = val;
return 0;
}
static const struct rtc_class_ops rs5c372_rtc_ops = {
.proc = rs5c372_rtc_proc,
.read_time = rs5c372_rtc_read_time,
.set_time = rs5c372_rtc_set_time,
.read_alarm = rs5c_read_alarm,
.set_alarm = rs5c_set_alarm,
.alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
.ioctl = rs5c372_ioctl,
.read_offset = rs5c372_read_offset,
.set_offset = rs5c372_set_offset,
};
#if IS_ENABLED(CONFIG_RTC_INTF_SYSFS)
static ssize_t rs5c372_sysfs_show_trim(struct device *dev,
struct device_attribute *attr, char *buf)
{
int err, trim;
err = rs5c372_get_trim(to_i2c_client(dev), NULL, &trim);
if (err)
return err;
return sprintf(buf, "%d\n", trim);
}
static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL);
static ssize_t rs5c372_sysfs_show_osc(struct device *dev,
struct device_attribute *attr, char *buf)
{
int err, osc;
err = rs5c372_get_trim(to_i2c_client(dev), &osc, NULL);
if (err)
return err;
return sprintf(buf, "%d.%03d KHz\n", osc / 1000, osc % 1000);
}
static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL);
static int rs5c_sysfs_register(struct device *dev)
{
int err;
err = device_create_file(dev, &dev_attr_trim);
if (err)
return err;
err = device_create_file(dev, &dev_attr_osc);
if (err)
device_remove_file(dev, &dev_attr_trim);
return err;
}
static void rs5c_sysfs_unregister(struct device *dev)
{
device_remove_file(dev, &dev_attr_trim);
device_remove_file(dev, &dev_attr_osc);
}
#else
static int rs5c_sysfs_register(struct device *dev)
{
return 0;
}
static void rs5c_sysfs_unregister(struct device *dev)
{
/* nothing */
}
#endif /* SYSFS */
static struct i2c_driver rs5c372_driver;
static int rs5c_oscillator_setup(struct rs5c372 *rs5c372)
{
unsigned char buf[2];
int addr, i, ret = 0;
addr = RS5C_ADDR(RS5C_REG_CTRL1);
buf[0] = rs5c372->regs[RS5C_REG_CTRL1];
buf[1] = rs5c372->regs[RS5C_REG_CTRL2];
switch (rs5c372->type) {
case rtc_r2025sd:
if (buf[1] & R2x2x_CTRL2_XSTP)
return ret;
break;
case rtc_r2221tl:
if (!(buf[1] & R2x2x_CTRL2_XSTP))
return ret;
break;
default:
if (!(buf[1] & RS5C_CTRL2_XSTP))
return ret;
break;
}
/* use 24hr mode */
switch (rs5c372->type) {
case rtc_rs5c372a:
case rtc_rs5c372b:
buf[1] |= RS5C372_CTRL2_24;
rs5c372->time24 = 1;
break;
case rtc_r2025sd:
case rtc_r2221tl:
case rtc_rv5c386:
case rtc_rv5c387a:
buf[0] |= RV5C387_CTRL1_24;
rs5c372->time24 = 1;
break;
default:
/* impossible */
break;
}
for (i = 0; i < sizeof(buf); i++) {
addr = RS5C_ADDR(RS5C_REG_CTRL1 + i);
ret = i2c_smbus_write_byte_data(rs5c372->client, addr, buf[i]);
if (unlikely(ret < 0))
return ret;
}
rs5c372->regs[RS5C_REG_CTRL1] = buf[0];
rs5c372->regs[RS5C_REG_CTRL2] = buf[1];
return 0;
}
static int rs5c372_probe(struct i2c_client *client)
{
int err = 0;
int smbus_mode = 0;
struct rs5c372 *rs5c372;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) {
/*
* If we don't have any master mode adapter, try breaking
* it down in to the barest of capabilities.
*/
if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK))
smbus_mode = 1;
else {
/* Still no good, give up */
err = -ENODEV;
goto exit;
}
}
rs5c372 = devm_kzalloc(&client->dev, sizeof(struct rs5c372),
GFP_KERNEL);
if (!rs5c372) {
err = -ENOMEM;
goto exit;
}
rs5c372->client = client;
i2c_set_clientdata(client, rs5c372);
if (client->dev.of_node) {
rs5c372->type = (uintptr_t)of_device_get_match_data(&client->dev);
} else {
const struct i2c_device_id *id = i2c_match_id(rs5c372_id, client);
rs5c372->type = id->driver_data;
}
/* we read registers 0x0f then 0x00-0x0f; skip the first one */
rs5c372->regs = &rs5c372->buf[1];
rs5c372->smbus = smbus_mode;
err = rs5c_get_regs(rs5c372);
if (err < 0)
goto exit;
/* clock may be set for am/pm or 24 hr time */
switch (rs5c372->type) {
case rtc_rs5c372a:
case rtc_rs5c372b:
/* alarm uses ALARM_A; and nINTRA on 372a, nINTR on 372b.
* so does periodic irq, except some 327a modes.
*/
if (rs5c372->regs[RS5C_REG_CTRL2] & RS5C372_CTRL2_24)
rs5c372->time24 = 1;
break;
case rtc_r2025sd:
case rtc_r2221tl:
case rtc_rv5c386:
case rtc_rv5c387a:
if (rs5c372->regs[RS5C_REG_CTRL1] & RV5C387_CTRL1_24)
rs5c372->time24 = 1;
/* alarm uses ALARM_W; and nINTRB for alarm and periodic
* irq, on both 386 and 387
*/
break;
default:
dev_err(&client->dev, "unknown RTC type\n");
goto exit;
}
/* if the oscillator lost power and no other software (like
* the bootloader) set it up, do it here.
*
* The R2025S/D does this a little differently than the other
* parts, so we special case that..
*/
err = rs5c_oscillator_setup(rs5c372);
if (unlikely(err < 0)) {
dev_err(&client->dev, "setup error\n");
goto exit;
}
dev_info(&client->dev, "%s found, %s\n",
({ char *s; switch (rs5c372->type) {
case rtc_r2025sd: s = "r2025sd"; break;
case rtc_r2221tl: s = "r2221tl"; break;
case rtc_rs5c372a: s = "rs5c372a"; break;
case rtc_rs5c372b: s = "rs5c372b"; break;
case rtc_rv5c386: s = "rv5c386"; break;
case rtc_rv5c387a: s = "rv5c387a"; break;
default: s = "chip"; break;
}; s;}),
rs5c372->time24 ? "24hr" : "am/pm"
);
/* REVISIT use client->irq to register alarm irq ... */
rs5c372->rtc = devm_rtc_device_register(&client->dev,
rs5c372_driver.driver.name,
&rs5c372_rtc_ops, THIS_MODULE);
if (IS_ERR(rs5c372->rtc)) {
err = PTR_ERR(rs5c372->rtc);
goto exit;
}
err = rs5c_sysfs_register(&client->dev);
if (err)
goto exit;
return 0;
exit:
return err;
}
static void rs5c372_remove(struct i2c_client *client)
{
rs5c_sysfs_unregister(&client->dev);
}
static struct i2c_driver rs5c372_driver = {
.driver = {
.name = "rtc-rs5c372",
.of_match_table = of_match_ptr(rs5c372_of_match),
},
.probe = rs5c372_probe,
.remove = rs5c372_remove,
.id_table = rs5c372_id,
};
module_i2c_driver(rs5c372_driver);
MODULE_AUTHOR(
"Pavel Mironchik <[email protected]>, "
"Alessandro Zummo <[email protected]>, "
"Paul Mundt <[email protected]>");
MODULE_DESCRIPTION("Ricoh RS5C372 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-rs5c372.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* rtc-efi: RTC Class Driver for EFI-based systems
*
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
*
* Author: dann frazier <[email protected]>
* Based on efirtc.c by Stephane Eranian
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/stringify.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/efi.h>
#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT)
/*
* returns day of the year [0-365]
*/
static inline int
compute_yday(efi_time_t *eft)
{
/* efi_time_t.month is in the [1-12] so, we need -1 */
return rtc_year_days(eft->day, eft->month - 1, eft->year);
}
/*
* returns day of the week [0-6] 0=Sunday
*/
static int
compute_wday(efi_time_t *eft, int yday)
{
int ndays = eft->year * (365 % 7)
+ (eft->year - 1) / 4
- (eft->year - 1) / 100
+ (eft->year - 1) / 400
+ yday;
/*
* 1/1/0000 may or may not have been a Sunday (if it ever existed at
* all) but assuming it was makes this calculation work correctly.
*/
return ndays % 7;
}
static void
convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
{
eft->year = wtime->tm_year + 1900;
eft->month = wtime->tm_mon + 1;
eft->day = wtime->tm_mday;
eft->hour = wtime->tm_hour;
eft->minute = wtime->tm_min;
eft->second = wtime->tm_sec;
eft->nanosecond = 0;
eft->daylight = wtime->tm_isdst ? EFI_ISDST : 0;
eft->timezone = EFI_UNSPECIFIED_TIMEZONE;
}
static bool
convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
{
memset(wtime, 0, sizeof(*wtime));
if (eft->second >= 60)
return false;
wtime->tm_sec = eft->second;
if (eft->minute >= 60)
return false;
wtime->tm_min = eft->minute;
if (eft->hour >= 24)
return false;
wtime->tm_hour = eft->hour;
if (!eft->day || eft->day > 31)
return false;
wtime->tm_mday = eft->day;
if (!eft->month || eft->month > 12)
return false;
wtime->tm_mon = eft->month - 1;
if (eft->year < 1900 || eft->year > 9999)
return false;
wtime->tm_year = eft->year - 1900;
/* day in the year [1-365]*/
wtime->tm_yday = compute_yday(eft);
/* day of the week [0-6], Sunday=0 */
wtime->tm_wday = compute_wday(eft, wtime->tm_yday);
switch (eft->daylight & EFI_ISDST) {
case EFI_ISDST:
wtime->tm_isdst = 1;
break;
case EFI_TIME_ADJUST_DAYLIGHT:
wtime->tm_isdst = 0;
break;
default:
wtime->tm_isdst = -1;
}
return true;
}
static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
efi_time_t eft;
efi_status_t status;
/*
* As of EFI v1.10, this call always returns an unsupported status
*/
status = efi.get_wakeup_time((efi_bool_t *)&wkalrm->enabled,
(efi_bool_t *)&wkalrm->pending, &eft);
if (status != EFI_SUCCESS)
return -EINVAL;
if (!convert_from_efi_time(&eft, &wkalrm->time))
return -EIO;
return rtc_valid_tm(&wkalrm->time);
}
static int efi_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
efi_time_t eft;
efi_status_t status;
convert_to_efi_time(&wkalrm->time, &eft);
/*
* XXX Fixme:
* As of EFI 0.92 with the firmware I have on my
* machine this call does not seem to work quite
* right
*
* As of v1.10, this call always returns an unsupported status
*/
status = efi.set_wakeup_time((efi_bool_t)wkalrm->enabled, &eft);
dev_warn(dev, "write status is %d\n", (int)status);
return status == EFI_SUCCESS ? 0 : -EINVAL;
}
static int efi_read_time(struct device *dev, struct rtc_time *tm)
{
efi_status_t status;
efi_time_t eft;
efi_time_cap_t cap;
status = efi.get_time(&eft, &cap);
if (status != EFI_SUCCESS) {
/* should never happen */
dev_err_once(dev, "can't read time\n");
return -EINVAL;
}
if (!convert_from_efi_time(&eft, tm))
return -EIO;
return 0;
}
static int efi_set_time(struct device *dev, struct rtc_time *tm)
{
efi_status_t status;
efi_time_t eft;
convert_to_efi_time(tm, &eft);
status = efi.set_time(&eft);
return status == EFI_SUCCESS ? 0 : -EINVAL;
}
static int efi_procfs(struct device *dev, struct seq_file *seq)
{
efi_time_t eft, alm;
efi_time_cap_t cap;
efi_bool_t enabled, pending;
struct rtc_device *rtc = dev_get_drvdata(dev);
memset(&eft, 0, sizeof(eft));
memset(&alm, 0, sizeof(alm));
memset(&cap, 0, sizeof(cap));
efi.get_time(&eft, &cap);
efi.get_wakeup_time(&enabled, &pending, &alm);
seq_printf(seq,
"Time\t\t: %u:%u:%u.%09u\n"
"Date\t\t: %u-%u-%u\n"
"Daylight\t: %u\n",
eft.hour, eft.minute, eft.second, eft.nanosecond,
eft.year, eft.month, eft.day,
eft.daylight);
if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
seq_puts(seq, "Timezone\t: unspecified\n");
else
/* XXX fixme: convert to string? */
seq_printf(seq, "Timezone\t: %u\n", eft.timezone);
if (test_bit(RTC_FEATURE_ALARM, rtc->features)) {
seq_printf(seq,
"Alarm Time\t: %u:%u:%u.%09u\n"
"Alarm Date\t: %u-%u-%u\n"
"Alarm Daylight\t: %u\n"
"Enabled\t\t: %s\n"
"Pending\t\t: %s\n",
alm.hour, alm.minute, alm.second, alm.nanosecond,
alm.year, alm.month, alm.day,
alm.daylight,
enabled == 1 ? "yes" : "no",
pending == 1 ? "yes" : "no");
if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
seq_puts(seq, "Timezone\t: unspecified\n");
else
/* XXX fixme: convert to string? */
seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
}
/*
* now prints the capabilities
*/
seq_printf(seq,
"Resolution\t: %u\n"
"Accuracy\t: %u\n"
"SetstoZero\t: %u\n",
cap.resolution, cap.accuracy, cap.sets_to_zero);
return 0;
}
static const struct rtc_class_ops efi_rtc_ops = {
.read_time = efi_read_time,
.set_time = efi_set_time,
.read_alarm = efi_read_alarm,
.set_alarm = efi_set_alarm,
.proc = efi_procfs,
};
static int __init efi_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
efi_time_t eft;
efi_time_cap_t cap;
/* First check if the RTC is usable */
if (efi.get_time(&eft, &cap) != EFI_SUCCESS)
return -ENODEV;
rtc = devm_rtc_allocate_device(&dev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
platform_set_drvdata(dev, rtc);
rtc->ops = &efi_rtc_ops;
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
if (efi_rt_services_supported(EFI_RT_SUPPORTED_WAKEUP_SERVICES))
set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
else
clear_bit(RTC_FEATURE_ALARM, rtc->features);
device_init_wakeup(&dev->dev, true);
return devm_rtc_register_device(rtc);
}
static struct platform_driver efi_rtc_driver = {
.driver = {
.name = "rtc-efi",
},
};
module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe);
MODULE_AUTHOR("dann frazier <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("EFI RTC driver");
MODULE_ALIAS("platform:rtc-efi");
| linux-master | drivers/rtc/rtc-efi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* An RTC driver for Allwinner A10/A20
*
* Copyright (c) 2013, Carlo Caione <[email protected]>
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/types.h>
#define SUNXI_LOSC_CTRL 0x0000
#define SUNXI_LOSC_CTRL_RTC_HMS_ACC BIT(8)
#define SUNXI_LOSC_CTRL_RTC_YMD_ACC BIT(7)
#define SUNXI_RTC_YMD 0x0004
#define SUNXI_RTC_HMS 0x0008
#define SUNXI_ALRM_DHMS 0x000c
#define SUNXI_ALRM_EN 0x0014
#define SUNXI_ALRM_EN_CNT_EN BIT(8)
#define SUNXI_ALRM_IRQ_EN 0x0018
#define SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN BIT(0)
#define SUNXI_ALRM_IRQ_STA 0x001c
#define SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND BIT(0)
#define SUNXI_MASK_DH 0x0000001f
#define SUNXI_MASK_SM 0x0000003f
#define SUNXI_MASK_M 0x0000000f
#define SUNXI_MASK_LY 0x00000001
#define SUNXI_MASK_D 0x00000ffe
#define SUNXI_MASK_M 0x0000000f
#define SUNXI_GET(x, mask, shift) (((x) & ((mask) << (shift))) \
>> (shift))
#define SUNXI_SET(x, mask, shift) (((x) & (mask)) << (shift))
/*
* Get date values
*/
#define SUNXI_DATE_GET_DAY_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 0)
#define SUNXI_DATE_GET_MON_VALUE(x) SUNXI_GET(x, SUNXI_MASK_M, 8)
#define SUNXI_DATE_GET_YEAR_VALUE(x, mask) SUNXI_GET(x, mask, 16)
/*
* Get time values
*/
#define SUNXI_TIME_GET_SEC_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 0)
#define SUNXI_TIME_GET_MIN_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 8)
#define SUNXI_TIME_GET_HOUR_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 16)
/*
* Get alarm values
*/
#define SUNXI_ALRM_GET_SEC_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 0)
#define SUNXI_ALRM_GET_MIN_VALUE(x) SUNXI_GET(x, SUNXI_MASK_SM, 8)
#define SUNXI_ALRM_GET_HOUR_VALUE(x) SUNXI_GET(x, SUNXI_MASK_DH, 16)
/*
* Set date values
*/
#define SUNXI_DATE_SET_DAY_VALUE(x) SUNXI_DATE_GET_DAY_VALUE(x)
#define SUNXI_DATE_SET_MON_VALUE(x) SUNXI_SET(x, SUNXI_MASK_M, 8)
#define SUNXI_DATE_SET_YEAR_VALUE(x, mask) SUNXI_SET(x, mask, 16)
#define SUNXI_LEAP_SET_VALUE(x, shift) SUNXI_SET(x, SUNXI_MASK_LY, shift)
/*
* Set time values
*/
#define SUNXI_TIME_SET_SEC_VALUE(x) SUNXI_TIME_GET_SEC_VALUE(x)
#define SUNXI_TIME_SET_MIN_VALUE(x) SUNXI_SET(x, SUNXI_MASK_SM, 8)
#define SUNXI_TIME_SET_HOUR_VALUE(x) SUNXI_SET(x, SUNXI_MASK_DH, 16)
/*
* Set alarm values
*/
#define SUNXI_ALRM_SET_SEC_VALUE(x) SUNXI_ALRM_GET_SEC_VALUE(x)
#define SUNXI_ALRM_SET_MIN_VALUE(x) SUNXI_SET(x, SUNXI_MASK_SM, 8)
#define SUNXI_ALRM_SET_HOUR_VALUE(x) SUNXI_SET(x, SUNXI_MASK_DH, 16)
#define SUNXI_ALRM_SET_DAY_VALUE(x) SUNXI_SET(x, SUNXI_MASK_D, 21)
/*
* Time unit conversions
*/
#define SEC_IN_MIN 60
#define SEC_IN_HOUR (60 * SEC_IN_MIN)
#define SEC_IN_DAY (24 * SEC_IN_HOUR)
/*
* The year parameter passed to the driver is usually an offset relative to
* the year 1900. This macro is used to convert this offset to another one
* relative to the minimum year allowed by the hardware.
*/
#define SUNXI_YEAR_OFF(x) ((x)->min - 1900)
/*
* min and max year are arbitrary set considering the limited range of the
* hardware register field
*/
struct sunxi_rtc_data_year {
unsigned int min; /* min year allowed */
unsigned int max; /* max year allowed */
unsigned int mask; /* mask for the year field */
unsigned char leap_shift; /* bit shift to get the leap year */
};
static const struct sunxi_rtc_data_year data_year_param[] = {
[0] = {
.min = 2010,
.max = 2073,
.mask = 0x3f,
.leap_shift = 22,
},
[1] = {
.min = 1970,
.max = 2225,
.mask = 0xff,
.leap_shift = 24,
},
};
struct sunxi_rtc_dev {
struct rtc_device *rtc;
struct device *dev;
const struct sunxi_rtc_data_year *data_year;
void __iomem *base;
int irq;
};
static irqreturn_t sunxi_rtc_alarmirq(int irq, void *id)
{
struct sunxi_rtc_dev *chip = (struct sunxi_rtc_dev *) id;
u32 val;
val = readl(chip->base + SUNXI_ALRM_IRQ_STA);
if (val & SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND) {
val |= SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND;
writel(val, chip->base + SUNXI_ALRM_IRQ_STA);
rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void sunxi_rtc_setaie(unsigned int to, struct sunxi_rtc_dev *chip)
{
u32 alrm_val = 0;
u32 alrm_irq_val = 0;
if (to) {
alrm_val = readl(chip->base + SUNXI_ALRM_EN);
alrm_val |= SUNXI_ALRM_EN_CNT_EN;
alrm_irq_val = readl(chip->base + SUNXI_ALRM_IRQ_EN);
alrm_irq_val |= SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN;
} else {
writel(SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND,
chip->base + SUNXI_ALRM_IRQ_STA);
}
writel(alrm_val, chip->base + SUNXI_ALRM_EN);
writel(alrm_irq_val, chip->base + SUNXI_ALRM_IRQ_EN);
}
static int sunxi_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time;
u32 alrm;
u32 alrm_en;
u32 date;
alrm = readl(chip->base + SUNXI_ALRM_DHMS);
date = readl(chip->base + SUNXI_RTC_YMD);
alrm_tm->tm_sec = SUNXI_ALRM_GET_SEC_VALUE(alrm);
alrm_tm->tm_min = SUNXI_ALRM_GET_MIN_VALUE(alrm);
alrm_tm->tm_hour = SUNXI_ALRM_GET_HOUR_VALUE(alrm);
alrm_tm->tm_mday = SUNXI_DATE_GET_DAY_VALUE(date);
alrm_tm->tm_mon = SUNXI_DATE_GET_MON_VALUE(date);
alrm_tm->tm_year = SUNXI_DATE_GET_YEAR_VALUE(date,
chip->data_year->mask);
alrm_tm->tm_mon -= 1;
/*
* switch from (data_year->min)-relative offset to
* a (1900)-relative one
*/
alrm_tm->tm_year += SUNXI_YEAR_OFF(chip->data_year);
alrm_en = readl(chip->base + SUNXI_ALRM_IRQ_EN);
if (alrm_en & SUNXI_ALRM_EN_CNT_EN)
wkalrm->enabled = 1;
return 0;
}
static int sunxi_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
u32 date, time;
/*
* read again in case it changes
*/
do {
date = readl(chip->base + SUNXI_RTC_YMD);
time = readl(chip->base + SUNXI_RTC_HMS);
} while ((date != readl(chip->base + SUNXI_RTC_YMD)) ||
(time != readl(chip->base + SUNXI_RTC_HMS)));
rtc_tm->tm_sec = SUNXI_TIME_GET_SEC_VALUE(time);
rtc_tm->tm_min = SUNXI_TIME_GET_MIN_VALUE(time);
rtc_tm->tm_hour = SUNXI_TIME_GET_HOUR_VALUE(time);
rtc_tm->tm_mday = SUNXI_DATE_GET_DAY_VALUE(date);
rtc_tm->tm_mon = SUNXI_DATE_GET_MON_VALUE(date);
rtc_tm->tm_year = SUNXI_DATE_GET_YEAR_VALUE(date,
chip->data_year->mask);
rtc_tm->tm_mon -= 1;
/*
* switch from (data_year->min)-relative offset to
* a (1900)-relative one
*/
rtc_tm->tm_year += SUNXI_YEAR_OFF(chip->data_year);
return 0;
}
static int sunxi_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time;
struct rtc_time tm_now;
u32 alrm;
time64_t diff;
unsigned long time_gap;
unsigned long time_gap_day;
unsigned long time_gap_hour;
unsigned long time_gap_min;
int ret;
ret = sunxi_rtc_gettime(dev, &tm_now);
if (ret < 0) {
dev_err(dev, "Error in getting time\n");
return -EINVAL;
}
diff = rtc_tm_sub(alrm_tm, &tm_now);
if (diff <= 0) {
dev_err(dev, "Date to set in the past\n");
return -EINVAL;
}
if (diff > 255 * SEC_IN_DAY) {
dev_err(dev, "Day must be in the range 0 - 255\n");
return -EINVAL;
}
time_gap = diff;
time_gap_day = time_gap / SEC_IN_DAY;
time_gap -= time_gap_day * SEC_IN_DAY;
time_gap_hour = time_gap / SEC_IN_HOUR;
time_gap -= time_gap_hour * SEC_IN_HOUR;
time_gap_min = time_gap / SEC_IN_MIN;
time_gap -= time_gap_min * SEC_IN_MIN;
sunxi_rtc_setaie(0, chip);
writel(0, chip->base + SUNXI_ALRM_DHMS);
usleep_range(100, 300);
alrm = SUNXI_ALRM_SET_SEC_VALUE(time_gap) |
SUNXI_ALRM_SET_MIN_VALUE(time_gap_min) |
SUNXI_ALRM_SET_HOUR_VALUE(time_gap_hour) |
SUNXI_ALRM_SET_DAY_VALUE(time_gap_day);
writel(alrm, chip->base + SUNXI_ALRM_DHMS);
writel(0, chip->base + SUNXI_ALRM_IRQ_EN);
writel(SUNXI_ALRM_IRQ_EN_CNT_IRQ_EN, chip->base + SUNXI_ALRM_IRQ_EN);
sunxi_rtc_setaie(wkalrm->enabled, chip);
return 0;
}
static int sunxi_rtc_wait(struct sunxi_rtc_dev *chip, int offset,
unsigned int mask, unsigned int ms_timeout)
{
const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout);
u32 reg;
do {
reg = readl(chip->base + offset);
reg &= mask;
if (reg == mask)
return 0;
} while (time_before(jiffies, timeout));
return -ETIMEDOUT;
}
static int sunxi_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
{
struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
u32 date = 0;
u32 time = 0;
unsigned int year;
/*
* the input rtc_tm->tm_year is the offset relative to 1900. We use
* the SUNXI_YEAR_OFF macro to rebase it with respect to the min year
* allowed by the hardware
*/
year = rtc_tm->tm_year + 1900;
if (year < chip->data_year->min || year > chip->data_year->max) {
dev_err(dev, "rtc only supports year in range %u - %u\n",
chip->data_year->min, chip->data_year->max);
return -EINVAL;
}
rtc_tm->tm_year -= SUNXI_YEAR_OFF(chip->data_year);
rtc_tm->tm_mon += 1;
date = SUNXI_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
SUNXI_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
SUNXI_DATE_SET_YEAR_VALUE(rtc_tm->tm_year,
chip->data_year->mask);
if (is_leap_year(year))
date |= SUNXI_LEAP_SET_VALUE(1, chip->data_year->leap_shift);
time = SUNXI_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
SUNXI_TIME_SET_MIN_VALUE(rtc_tm->tm_min) |
SUNXI_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
writel(0, chip->base + SUNXI_RTC_HMS);
writel(0, chip->base + SUNXI_RTC_YMD);
writel(time, chip->base + SUNXI_RTC_HMS);
/*
* After writing the RTC HH-MM-SS register, the
* SUNXI_LOSC_CTRL_RTC_HMS_ACC bit is set and it will not
* be cleared until the real writing operation is finished
*/
if (sunxi_rtc_wait(chip, SUNXI_LOSC_CTRL,
SUNXI_LOSC_CTRL_RTC_HMS_ACC, 50)) {
dev_err(dev, "Failed to set rtc time.\n");
return -1;
}
writel(date, chip->base + SUNXI_RTC_YMD);
/*
* After writing the RTC YY-MM-DD register, the
* SUNXI_LOSC_CTRL_RTC_YMD_ACC bit is set and it will not
* be cleared until the real writing operation is finished
*/
if (sunxi_rtc_wait(chip, SUNXI_LOSC_CTRL,
SUNXI_LOSC_CTRL_RTC_YMD_ACC, 50)) {
dev_err(dev, "Failed to set rtc time.\n");
return -1;
}
return 0;
}
static int sunxi_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct sunxi_rtc_dev *chip = dev_get_drvdata(dev);
if (!enabled)
sunxi_rtc_setaie(enabled, chip);
return 0;
}
static const struct rtc_class_ops sunxi_rtc_ops = {
.read_time = sunxi_rtc_gettime,
.set_time = sunxi_rtc_settime,
.read_alarm = sunxi_rtc_getalarm,
.set_alarm = sunxi_rtc_setalarm,
.alarm_irq_enable = sunxi_rtc_alarm_irq_enable
};
static const struct of_device_id sunxi_rtc_dt_ids[] = {
{ .compatible = "allwinner,sun4i-a10-rtc", .data = &data_year_param[0] },
{ .compatible = "allwinner,sun7i-a20-rtc", .data = &data_year_param[1] },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sunxi_rtc_dt_ids);
static int sunxi_rtc_probe(struct platform_device *pdev)
{
struct sunxi_rtc_dev *chip;
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(pdev, chip);
chip->dev = &pdev->dev;
chip->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
return chip->irq;
ret = devm_request_irq(&pdev->dev, chip->irq, sunxi_rtc_alarmirq,
0, dev_name(&pdev->dev), chip);
if (ret) {
dev_err(&pdev->dev, "Could not request IRQ\n");
return ret;
}
chip->data_year = of_device_get_match_data(&pdev->dev);
if (!chip->data_year) {
dev_err(&pdev->dev, "Unable to setup RTC data\n");
return -ENODEV;
}
/* clear the alarm count value */
writel(0, chip->base + SUNXI_ALRM_DHMS);
/* disable alarm, not generate irq pending */
writel(0, chip->base + SUNXI_ALRM_EN);
/* disable alarm week/cnt irq, unset to cpu */
writel(0, chip->base + SUNXI_ALRM_IRQ_EN);
/* clear alarm week/cnt irq pending */
writel(SUNXI_ALRM_IRQ_STA_CNT_IRQ_PEND, chip->base +
SUNXI_ALRM_IRQ_STA);
chip->rtc->ops = &sunxi_rtc_ops;
return devm_rtc_register_device(chip->rtc);
}
static struct platform_driver sunxi_rtc_driver = {
.probe = sunxi_rtc_probe,
.driver = {
.name = "sunxi-rtc",
.of_match_table = sunxi_rtc_dt_ids,
},
};
module_platform_driver(sunxi_rtc_driver);
MODULE_DESCRIPTION("sunxi RTC driver");
MODULE_AUTHOR("Carlo Caione <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-sunxi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015 MediaTek Inc.
* Author: Tianping.Fang <[email protected]>
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/mfd/mt6397/rtc.h>
#include <linux/mod_devicetable.h>
static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
{
int ret;
u32 data;
ret = regmap_write(rtc->regmap, rtc->addr_base + rtc->data->wrtgr, 1);
if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(rtc->regmap,
rtc->addr_base + RTC_BBPU, data,
!(data & RTC_BBPU_CBUSY),
MTK_RTC_POLL_DELAY_US,
MTK_RTC_POLL_TIMEOUT);
if (ret < 0)
dev_err(rtc->rtc_dev->dev.parent,
"failed to write WRTGR: %d\n", ret);
return ret;
}
static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
{
struct mt6397_rtc *rtc = data;
u32 irqsta, irqen;
int ret;
ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_IRQ_STA, &irqsta);
if ((ret >= 0) && (irqsta & RTC_IRQ_STA_AL)) {
rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
irqen = irqsta & ~RTC_IRQ_EN_AL;
mutex_lock(&rtc->lock);
if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
irqen) == 0)
mtk_rtc_write_trigger(rtc);
mutex_unlock(&rtc->lock);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int __mtk_rtc_read_time(struct mt6397_rtc *rtc,
struct rtc_time *tm, int *sec)
{
int ret;
u16 data[RTC_OFFSET_COUNT];
mutex_lock(&rtc->lock);
ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_TC_SEC,
data, RTC_OFFSET_COUNT);
if (ret < 0)
goto exit;
tm->tm_sec = data[RTC_OFFSET_SEC];
tm->tm_min = data[RTC_OFFSET_MIN];
tm->tm_hour = data[RTC_OFFSET_HOUR];
tm->tm_mday = data[RTC_OFFSET_DOM];
tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_TC_MTH_MASK;
tm->tm_year = data[RTC_OFFSET_YEAR];
ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_TC_SEC, sec);
exit:
mutex_unlock(&rtc->lock);
return ret;
}
static int mtk_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
time64_t time;
struct mt6397_rtc *rtc = dev_get_drvdata(dev);
int days, sec, ret;
do {
ret = __mtk_rtc_read_time(rtc, tm, &sec);
if (ret < 0)
goto exit;
} while (sec < tm->tm_sec);
/* HW register use 7 bits to store year data, minus
* RTC_MIN_YEAR_OFFSET before write year data to register, and plus
* RTC_MIN_YEAR_OFFSET back after read year from register
*/
tm->tm_year += RTC_MIN_YEAR_OFFSET;
/* HW register start mon from one, but tm_mon start from zero. */
tm->tm_mon--;
time = rtc_tm_to_time64(tm);
/* rtc_tm_to_time64 covert Gregorian date to seconds since
* 01-01-1970 00:00:00, and this date is Thursday.
*/
days = div_s64(time, 86400);
tm->tm_wday = (days + 4) % 7;
exit:
return ret;
}
static int mtk_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mt6397_rtc *rtc = dev_get_drvdata(dev);
int ret;
u16 data[RTC_OFFSET_COUNT];
tm->tm_year -= RTC_MIN_YEAR_OFFSET;
tm->tm_mon++;
data[RTC_OFFSET_SEC] = tm->tm_sec;
data[RTC_OFFSET_MIN] = tm->tm_min;
data[RTC_OFFSET_HOUR] = tm->tm_hour;
data[RTC_OFFSET_DOM] = tm->tm_mday;
data[RTC_OFFSET_MTH] = tm->tm_mon;
data[RTC_OFFSET_YEAR] = tm->tm_year;
mutex_lock(&rtc->lock);
ret = regmap_bulk_write(rtc->regmap, rtc->addr_base + RTC_TC_SEC,
data, RTC_OFFSET_COUNT);
if (ret < 0)
goto exit;
/* Time register write to hardware after call trigger function */
ret = mtk_rtc_write_trigger(rtc);
exit:
mutex_unlock(&rtc->lock);
return ret;
}
static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rtc_time *tm = &alm->time;
struct mt6397_rtc *rtc = dev_get_drvdata(dev);
u32 irqen, pdn2;
int ret;
u16 data[RTC_OFFSET_COUNT];
mutex_lock(&rtc->lock);
ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, &irqen);
if (ret < 0)
goto err_exit;
ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_PDN2, &pdn2);
if (ret < 0)
goto err_exit;
ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
data, RTC_OFFSET_COUNT);
if (ret < 0)
goto err_exit;
alm->enabled = !!(irqen & RTC_IRQ_EN_AL);
alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
mutex_unlock(&rtc->lock);
tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
tm->tm_year += RTC_MIN_YEAR_OFFSET;
tm->tm_mon--;
return 0;
err_exit:
mutex_unlock(&rtc->lock);
return ret;
}
static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct rtc_time *tm = &alm->time;
struct mt6397_rtc *rtc = dev_get_drvdata(dev);
int ret;
u16 data[RTC_OFFSET_COUNT];
tm->tm_year -= RTC_MIN_YEAR_OFFSET;
tm->tm_mon++;
mutex_lock(&rtc->lock);
ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
data, RTC_OFFSET_COUNT);
if (ret < 0)
goto exit;
data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
(tm->tm_sec & RTC_AL_SEC_MASK));
data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
(tm->tm_min & RTC_AL_MIN_MASK));
data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
(tm->tm_hour & RTC_AL_HOU_MASK));
data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
(tm->tm_mday & RTC_AL_DOM_MASK));
data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
(tm->tm_mon & RTC_AL_MTH_MASK));
data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
(tm->tm_year & RTC_AL_YEA_MASK));
if (alm->enabled) {
ret = regmap_bulk_write(rtc->regmap,
rtc->addr_base + RTC_AL_SEC,
data, RTC_OFFSET_COUNT);
if (ret < 0)
goto exit;
ret = regmap_write(rtc->regmap, rtc->addr_base + RTC_AL_MASK,
RTC_AL_MASK_DOW);
if (ret < 0)
goto exit;
ret = regmap_update_bits(rtc->regmap,
rtc->addr_base + RTC_IRQ_EN,
RTC_IRQ_EN_ONESHOT_AL,
RTC_IRQ_EN_ONESHOT_AL);
if (ret < 0)
goto exit;
} else {
ret = regmap_update_bits(rtc->regmap,
rtc->addr_base + RTC_IRQ_EN,
RTC_IRQ_EN_ONESHOT_AL, 0);
if (ret < 0)
goto exit;
}
/* All alarm time register write to hardware after calling
* mtk_rtc_write_trigger. This can avoid race condition if alarm
* occur happen during writing alarm time register.
*/
ret = mtk_rtc_write_trigger(rtc);
exit:
mutex_unlock(&rtc->lock);
return ret;
}
static const struct rtc_class_ops mtk_rtc_ops = {
.read_time = mtk_rtc_read_time,
.set_time = mtk_rtc_set_time,
.read_alarm = mtk_rtc_read_alarm,
.set_alarm = mtk_rtc_set_alarm,
};
static int mtk_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct mt6397_chip *mt6397_chip = dev_get_drvdata(pdev->dev.parent);
struct mt6397_rtc *rtc;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(struct mt6397_rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
rtc->addr_base = res->start;
rtc->data = of_device_get_match_data(&pdev->dev);
rtc->irq = platform_get_irq(pdev, 0);
if (rtc->irq < 0)
return rtc->irq;
rtc->regmap = mt6397_chip->regmap;
mutex_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
mtk_rtc_irq_handler_thread,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
"mt6397-rtc", rtc);
if (ret) {
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
rtc->irq, ret);
return ret;
}
device_init_wakeup(&pdev->dev, 1);
rtc->rtc_dev->ops = &mtk_rtc_ops;
return devm_rtc_register_device(rtc->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
static int mt6397_rtc_suspend(struct device *dev)
{
struct mt6397_rtc *rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(rtc->irq);
return 0;
}
static int mt6397_rtc_resume(struct device *dev)
{
struct mt6397_rtc *rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
mt6397_rtc_resume);
static const struct mtk_rtc_data mt6358_rtc_data = {
.wrtgr = RTC_WRTGR_MT6358,
};
static const struct mtk_rtc_data mt6397_rtc_data = {
.wrtgr = RTC_WRTGR_MT6397,
};
static const struct of_device_id mt6397_rtc_of_match[] = {
{ .compatible = "mediatek,mt6323-rtc", .data = &mt6397_rtc_data },
{ .compatible = "mediatek,mt6358-rtc", .data = &mt6358_rtc_data },
{ .compatible = "mediatek,mt6397-rtc", .data = &mt6397_rtc_data },
{ }
};
MODULE_DEVICE_TABLE(of, mt6397_rtc_of_match);
static struct platform_driver mtk_rtc_driver = {
.driver = {
.name = "mt6397-rtc",
.of_match_table = mt6397_rtc_of_match,
.pm = &mt6397_pm_ops,
},
.probe = mtk_rtc_probe,
};
module_platform_driver(mtk_rtc_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tianping Fang <[email protected]>");
MODULE_DESCRIPTION("RTC Driver for MediaTek MT6397 PMIC");
| linux-master | drivers/rtc/rtc-mt6397.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Real Time Clock driver for Marvell 88PM860x PMIC
*
* Copyright (c) 2010 Marvell International Ltd.
* Author: Haojian Zhuang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
#include <linux/mfd/88pm860x.h>
#define VRTC_CALIBRATION
struct pm860x_rtc_info {
struct pm860x_chip *chip;
struct i2c_client *i2c;
struct rtc_device *rtc_dev;
struct device *dev;
struct delayed_work calib_work;
int irq;
int vrtc;
};
#define REG_VRTC_MEAS1 0x7D
#define REG0_ADDR 0xB0
#define REG1_ADDR 0xB2
#define REG2_ADDR 0xB4
#define REG3_ADDR 0xB6
#define REG0_DATA 0xB1
#define REG1_DATA 0xB3
#define REG2_DATA 0xB5
#define REG3_DATA 0xB7
/* bit definitions of Measurement Enable Register 2 (0x51) */
#define MEAS2_VRTC (1 << 0)
/* bit definitions of RTC Register 1 (0xA0) */
#define ALARM_EN (1 << 3)
#define ALARM_WAKEUP (1 << 4)
#define ALARM (1 << 5)
#define RTC1_USE_XO (1 << 7)
#define VRTC_CALIB_INTERVAL (HZ * 60 * 10) /* 10 minutes */
static irqreturn_t rtc_update_handler(int irq, void *data)
{
struct pm860x_rtc_info *info = (struct pm860x_rtc_info *)data;
int mask;
mask = ALARM | ALARM_WAKEUP;
pm860x_set_bits(info->i2c, PM8607_RTC1, mask | ALARM_EN, mask);
rtc_update_irq(info->rtc_dev, 1, RTC_AF);
return IRQ_HANDLED;
}
static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
if (enabled)
pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, ALARM_EN);
else
pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0);
return 0;
}
static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[8];
unsigned long ticks, base, data;
pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
(buf[5] << 8) | buf[7];
/* load 32-bit read-only counter */
pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
rtc_time64_to_tm(ticks, tm);
return 0;
}
static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[4];
unsigned long ticks, base, data;
ticks = rtc_tm_to_time64(tm);
/* load 32-bit read-only counter */
pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
base = ticks - data;
dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
pm860x_page_reg_write(info->i2c, REG0_DATA, (base >> 24) & 0xFF);
pm860x_page_reg_write(info->i2c, REG1_DATA, (base >> 16) & 0xFF);
pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF);
pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF);
return 0;
}
static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
unsigned char buf[8];
unsigned long ticks, base, data;
int ret;
pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
(buf[5] << 8) | buf[7];
pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
(buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
rtc_time64_to_tm(ticks, &alrm->time);
ret = pm860x_reg_read(info->i2c, PM8607_RTC1);
alrm->enabled = (ret & ALARM_EN) ? 1 : 0;
alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0;
return 0;
}
static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
unsigned long ticks, base, data;
unsigned char buf[8];
int mask;
pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0);
pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
(buf[5] << 8) | buf[7];
ticks = rtc_tm_to_time64(&alrm->time);
data = ticks - base;
buf[0] = data & 0xff;
buf[1] = (data >> 8) & 0xff;
buf[2] = (data >> 16) & 0xff;
buf[3] = (data >> 24) & 0xff;
pm860x_bulk_write(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
if (alrm->enabled) {
mask = ALARM | ALARM_WAKEUP | ALARM_EN;
pm860x_set_bits(info->i2c, PM8607_RTC1, mask, mask);
} else {
mask = ALARM | ALARM_WAKEUP | ALARM_EN;
pm860x_set_bits(info->i2c, PM8607_RTC1, mask,
ALARM | ALARM_WAKEUP);
}
return 0;
}
static const struct rtc_class_ops pm860x_rtc_ops = {
.read_time = pm860x_rtc_read_time,
.set_time = pm860x_rtc_set_time,
.read_alarm = pm860x_rtc_read_alarm,
.set_alarm = pm860x_rtc_set_alarm,
.alarm_irq_enable = pm860x_rtc_alarm_irq_enable,
};
#ifdef VRTC_CALIBRATION
static void calibrate_vrtc_work(struct work_struct *work)
{
struct pm860x_rtc_info *info = container_of(work,
struct pm860x_rtc_info, calib_work.work);
unsigned char buf[2];
unsigned int sum, data, mean, vrtc_set;
int i;
for (i = 0, sum = 0; i < 16; i++) {
msleep(100);
pm860x_bulk_read(info->i2c, REG_VRTC_MEAS1, 2, buf);
data = (buf[0] << 4) | buf[1];
data = (data * 5400) >> 12; /* convert to mv */
sum += data;
}
mean = sum >> 4;
vrtc_set = 2700 + (info->vrtc & 0x3) * 200;
dev_dbg(info->dev, "mean:%d, vrtc_set:%d\n", mean, vrtc_set);
sum = pm860x_reg_read(info->i2c, PM8607_RTC_MISC1);
data = sum & 0x3;
if ((mean + 200) < vrtc_set) {
/* try higher voltage */
if (++data == 4)
goto out;
data = (sum & 0xf8) | (data & 0x3);
pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data);
} else if ((mean - 200) > vrtc_set) {
/* try lower voltage */
if (data-- == 0)
goto out;
data = (sum & 0xf8) | (data & 0x3);
pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data);
} else
goto out;
dev_dbg(info->dev, "set 0x%x to RTC_MISC1\n", data);
/* trigger next calibration since VRTC is updated */
schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
return;
out:
/* disable measurement */
pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
dev_dbg(info->dev, "finish VRTC calibration\n");
return;
}
#endif
#ifdef CONFIG_OF
static int pm860x_rtc_dt_init(struct platform_device *pdev,
struct pm860x_rtc_info *info)
{
struct device_node *np = pdev->dev.parent->of_node;
int ret;
if (!np)
return -ENODEV;
np = of_get_child_by_name(np, "rtc");
if (!np) {
dev_err(&pdev->dev, "failed to find rtc node\n");
return -ENODEV;
}
ret = of_property_read_u32(np, "marvell,88pm860x-vrtc", &info->vrtc);
if (ret)
info->vrtc = 0;
of_node_put(np);
return 0;
}
#else
#define pm860x_rtc_dt_init(x, y) do { } while (0)
#endif
static int pm860x_rtc_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_rtc_info *info;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_rtc_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0)
return info->irq;
info->chip = chip;
info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, info);
info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc_dev))
return PTR_ERR(info->rtc_dev);
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
rtc_update_handler, IRQF_ONESHOT, "rtc",
info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq, ret);
return ret;
}
/* set addresses of 32-bit base value for RTC time */
pm860x_page_reg_write(info->i2c, REG0_ADDR, REG0_DATA);
pm860x_page_reg_write(info->i2c, REG1_ADDR, REG1_DATA);
pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA);
pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA);
pm860x_rtc_dt_init(pdev, info);
info->rtc_dev->ops = &pm860x_rtc_ops;
info->rtc_dev->range_max = U32_MAX;
ret = devm_rtc_register_device(info->rtc_dev);
if (ret)
return ret;
/*
* enable internal XO instead of internal 3.25MHz clock since it can
* free running in PMIC power-down state.
*/
pm860x_set_bits(info->i2c, PM8607_RTC1, RTC1_USE_XO, RTC1_USE_XO);
#ifdef VRTC_CALIBRATION
/* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */
pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC);
/* calibrate VRTC */
INIT_DELAYED_WORK(&info->calib_work, calibrate_vrtc_work);
schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
#endif /* VRTC_CALIBRATION */
device_init_wakeup(&pdev->dev, 1);
return 0;
}
static void pm860x_rtc_remove(struct platform_device *pdev)
{
struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
#ifdef VRTC_CALIBRATION
cancel_delayed_work_sync(&info->calib_work);
/* disable measurement */
pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
#endif /* VRTC_CALIBRATION */
}
#ifdef CONFIG_PM_SLEEP
static int pm860x_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag |= 1 << PM8607_IRQ_RTC;
return 0;
}
static int pm860x_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag &= ~(1 << PM8607_IRQ_RTC);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(pm860x_rtc_pm_ops, pm860x_rtc_suspend, pm860x_rtc_resume);
static struct platform_driver pm860x_rtc_driver = {
.driver = {
.name = "88pm860x-rtc",
.pm = &pm860x_rtc_pm_ops,
},
.probe = pm860x_rtc_probe,
.remove_new = pm860x_rtc_remove,
};
module_platform_driver(pm860x_rtc_driver);
MODULE_DESCRIPTION("Marvell 88PM860x RTC driver");
MODULE_AUTHOR("Haojian Zhuang <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-88pm860x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RTC interface for Wilco Embedded Controller with R/W abilities
*
* Copyright 2018 Google LLC
*
* The corresponding platform device is typically registered in
* drivers/platform/chrome/wilco_ec/core.c
*/
#include <linux/bcd.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/rtc.h>
#include <linux/timekeeping.h>
#define EC_COMMAND_CMOS 0x7c
#define EC_CMOS_TOD_WRITE 0x02
#define EC_CMOS_TOD_READ 0x08
/* Message sent to the EC to request the current time. */
struct ec_rtc_read_request {
u8 command;
u8 reserved;
u8 param;
} __packed;
static struct ec_rtc_read_request read_rq = {
.command = EC_COMMAND_CMOS,
.param = EC_CMOS_TOD_READ,
};
/**
* struct ec_rtc_read_response - Format of RTC returned by EC.
* @reserved: Unused byte
* @second: Second value (0..59)
* @minute: Minute value (0..59)
* @hour: Hour value (0..23)
* @day: Day value (1..31)
* @month: Month value (1..12)
* @year: Year value (full year % 100)
* @century: Century value (full year / 100)
*
* All values are presented in binary (not BCD).
*/
struct ec_rtc_read_response {
u8 reserved;
u8 second;
u8 minute;
u8 hour;
u8 day;
u8 month;
u8 year;
u8 century;
} __packed;
/**
* struct ec_rtc_write_request - Format of RTC sent to the EC.
* @command: Always EC_COMMAND_CMOS
* @reserved: Unused byte
* @param: Always EC_CMOS_TOD_WRITE
* @century: Century value (full year / 100)
* @year: Year value (full year % 100)
* @month: Month value (1..12)
* @day: Day value (1..31)
* @hour: Hour value (0..23)
* @minute: Minute value (0..59)
* @second: Second value (0..59)
* @weekday: Day of the week (0=Saturday)
*
* All values are presented in BCD.
*/
struct ec_rtc_write_request {
u8 command;
u8 reserved;
u8 param;
u8 century;
u8 year;
u8 month;
u8 day;
u8 hour;
u8 minute;
u8 second;
u8 weekday;
} __packed;
static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
struct ec_rtc_read_response rtc;
struct wilco_ec_message msg;
int ret;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
msg.request_data = &read_rq;
msg.request_size = sizeof(read_rq);
msg.response_data = &rtc;
msg.response_size = sizeof(rtc);
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
tm->tm_sec = rtc.second;
tm->tm_min = rtc.minute;
tm->tm_hour = rtc.hour;
tm->tm_mday = rtc.day;
tm->tm_mon = rtc.month - 1;
tm->tm_year = rtc.year + (rtc.century * 100) - 1900;
/* Ignore other tm fields, man rtc says userspace shouldn't use them. */
if (rtc_valid_tm(tm)) {
dev_err(dev, "Time from RTC is invalid: %ptRr\n", tm);
return -EIO;
}
return 0;
}
static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev->parent);
struct ec_rtc_write_request rtc;
struct wilco_ec_message msg;
int year = tm->tm_year + 1900;
/*
* Convert from 0=Sunday to 0=Saturday for the EC
* We DO need to set weekday because the EC controls battery charging
* schedules that depend on the day of the week.
*/
int wday = tm->tm_wday == 6 ? 0 : tm->tm_wday + 1;
int ret;
rtc.command = EC_COMMAND_CMOS;
rtc.param = EC_CMOS_TOD_WRITE;
rtc.century = bin2bcd(year / 100);
rtc.year = bin2bcd(year % 100);
rtc.month = bin2bcd(tm->tm_mon + 1);
rtc.day = bin2bcd(tm->tm_mday);
rtc.hour = bin2bcd(tm->tm_hour);
rtc.minute = bin2bcd(tm->tm_min);
rtc.second = bin2bcd(tm->tm_sec);
rtc.weekday = bin2bcd(wday);
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
msg.request_data = &rtc;
msg.request_size = sizeof(rtc);
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
return 0;
}
static const struct rtc_class_ops wilco_ec_rtc_ops = {
.read_time = wilco_ec_rtc_read,
.set_time = wilco_ec_rtc_write,
};
static int wilco_ec_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
rtc->ops = &wilco_ec_rtc_ops;
/* EC only supports this century */
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
rtc->owner = THIS_MODULE;
return devm_rtc_register_device(rtc);
}
static struct platform_driver wilco_ec_rtc_driver = {
.driver = {
.name = "rtc-wilco-ec",
},
.probe = wilco_ec_rtc_probe,
};
module_platform_driver(wilco_ec_rtc_driver);
MODULE_ALIAS("platform:rtc-wilco-ec");
MODULE_AUTHOR("Nick Crews <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Wilco EC RTC driver");
| linux-master | drivers/rtc/rtc-wilco-ec.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2013-2014 Samsung Electronics Co., Ltd
// http://www.samsung.com
//
// Copyright (C) 2013 Google, Inc
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/rtc.h>
#include <linux/mfd/samsung/s2mps14.h>
/*
* Maximum number of retries for checking changes in UDR field
* of S5M_RTC_UDR_CON register (to limit possible endless loop).
*
* After writing to RTC registers (setting time or alarm) read the UDR field
* in S5M_RTC_UDR_CON register. UDR is auto-cleared when data have
* been transferred.
*/
#define UDR_READ_RETRY_CNT 5
enum {
RTC_SEC = 0,
RTC_MIN,
RTC_HOUR,
RTC_WEEKDAY,
RTC_DATE,
RTC_MONTH,
RTC_YEAR1,
RTC_YEAR2,
/* Make sure this is always the last enum name. */
RTC_MAX_NUM_TIME_REGS
};
/*
* Registers used by the driver which are different between chipsets.
*
* Operations like read time and write alarm/time require updating
* specific fields in UDR register. These fields usually are auto-cleared
* (with some exceptions).
*
* Table of operations per device:
*
* Device | Write time | Read time | Write alarm
* =================================================
* S5M8767 | UDR + TIME | | UDR
* S2MPS11/14 | WUDR | RUDR | WUDR + RUDR
* S2MPS13 | WUDR | RUDR | WUDR + AUDR
* S2MPS15 | WUDR | RUDR | AUDR
*/
struct s5m_rtc_reg_config {
/* Number of registers used for setting time/alarm0/alarm1 */
unsigned int regs_count;
/* First register for time, seconds */
unsigned int time;
/* RTC control register */
unsigned int ctrl;
/* First register for alarm 0, seconds */
unsigned int alarm0;
/* First register for alarm 1, seconds */
unsigned int alarm1;
/*
* Register for update flag (UDR). Typically setting UDR field to 1
* will enable update of time or alarm register. Then it will be
* auto-cleared after successful update.
*/
unsigned int udr_update;
/* Auto-cleared mask in UDR field for writing time and alarm */
unsigned int autoclear_udr_mask;
/*
* Masks in UDR field for time and alarm operations.
* The read time mask can be 0. Rest should not.
*/
unsigned int read_time_udr_mask;
unsigned int write_time_udr_mask;
unsigned int write_alarm_udr_mask;
};
/* Register map for S5M8767 */
static const struct s5m_rtc_reg_config s5m_rtc_regs = {
.regs_count = 8,
.time = S5M_RTC_SEC,
.ctrl = S5M_ALARM1_CONF,
.alarm0 = S5M_ALARM0_SEC,
.alarm1 = S5M_ALARM1_SEC,
.udr_update = S5M_RTC_UDR_CON,
.autoclear_udr_mask = S5M_RTC_UDR_MASK,
.read_time_udr_mask = 0, /* Not needed */
.write_time_udr_mask = S5M_RTC_UDR_MASK | S5M_RTC_TIME_EN_MASK,
.write_alarm_udr_mask = S5M_RTC_UDR_MASK,
};
/* Register map for S2MPS13 */
static const struct s5m_rtc_reg_config s2mps13_rtc_regs = {
.regs_count = 7,
.time = S2MPS_RTC_SEC,
.ctrl = S2MPS_RTC_CTRL,
.alarm0 = S2MPS_ALARM0_SEC,
.alarm1 = S2MPS_ALARM1_SEC,
.udr_update = S2MPS_RTC_UDR_CON,
.autoclear_udr_mask = S2MPS_RTC_WUDR_MASK,
.read_time_udr_mask = S2MPS_RTC_RUDR_MASK,
.write_time_udr_mask = S2MPS_RTC_WUDR_MASK,
.write_alarm_udr_mask = S2MPS_RTC_WUDR_MASK | S2MPS13_RTC_AUDR_MASK,
};
/* Register map for S2MPS11/14 */
static const struct s5m_rtc_reg_config s2mps14_rtc_regs = {
.regs_count = 7,
.time = S2MPS_RTC_SEC,
.ctrl = S2MPS_RTC_CTRL,
.alarm0 = S2MPS_ALARM0_SEC,
.alarm1 = S2MPS_ALARM1_SEC,
.udr_update = S2MPS_RTC_UDR_CON,
.autoclear_udr_mask = S2MPS_RTC_WUDR_MASK,
.read_time_udr_mask = S2MPS_RTC_RUDR_MASK,
.write_time_udr_mask = S2MPS_RTC_WUDR_MASK,
.write_alarm_udr_mask = S2MPS_RTC_WUDR_MASK | S2MPS_RTC_RUDR_MASK,
};
/*
* Register map for S2MPS15 - in comparison to S2MPS14 the WUDR and AUDR bits
* are swapped.
*/
static const struct s5m_rtc_reg_config s2mps15_rtc_regs = {
.regs_count = 7,
.time = S2MPS_RTC_SEC,
.ctrl = S2MPS_RTC_CTRL,
.alarm0 = S2MPS_ALARM0_SEC,
.alarm1 = S2MPS_ALARM1_SEC,
.udr_update = S2MPS_RTC_UDR_CON,
.autoclear_udr_mask = S2MPS_RTC_WUDR_MASK,
.read_time_udr_mask = S2MPS_RTC_RUDR_MASK,
.write_time_udr_mask = S2MPS15_RTC_WUDR_MASK,
.write_alarm_udr_mask = S2MPS15_RTC_AUDR_MASK,
};
struct s5m_rtc_info {
struct device *dev;
struct i2c_client *i2c;
struct sec_pmic_dev *s5m87xx;
struct regmap *regmap;
struct rtc_device *rtc_dev;
int irq;
enum sec_device_type device_type;
int rtc_24hr_mode;
const struct s5m_rtc_reg_config *regs;
};
static const struct regmap_config s5m_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = S5M_RTC_REG_MAX,
};
static const struct regmap_config s2mps14_rtc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = S2MPS_RTC_REG_MAX,
};
static void s5m8767_data_to_tm(u8 *data, struct rtc_time *tm,
int rtc_24hr_mode)
{
tm->tm_sec = data[RTC_SEC] & 0x7f;
tm->tm_min = data[RTC_MIN] & 0x7f;
if (rtc_24hr_mode) {
tm->tm_hour = data[RTC_HOUR] & 0x1f;
} else {
tm->tm_hour = data[RTC_HOUR] & 0x0f;
if (data[RTC_HOUR] & HOUR_PM_MASK)
tm->tm_hour += 12;
}
tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0x7f);
tm->tm_mday = data[RTC_DATE] & 0x1f;
tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
tm->tm_year = (data[RTC_YEAR1] & 0x7f) + 100;
tm->tm_yday = 0;
tm->tm_isdst = 0;
}
static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
{
data[RTC_SEC] = tm->tm_sec;
data[RTC_MIN] = tm->tm_min;
if (tm->tm_hour >= 12)
data[RTC_HOUR] = tm->tm_hour | HOUR_PM_MASK;
else
data[RTC_HOUR] = tm->tm_hour & ~HOUR_PM_MASK;
data[RTC_WEEKDAY] = 1 << tm->tm_wday;
data[RTC_DATE] = tm->tm_mday;
data[RTC_MONTH] = tm->tm_mon + 1;
data[RTC_YEAR1] = tm->tm_year - 100;
return 0;
}
/*
* Read RTC_UDR_CON register and wait till UDR field is cleared.
* This indicates that time/alarm update ended.
*/
static int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
{
int ret, retry = UDR_READ_RETRY_CNT;
unsigned int data;
do {
ret = regmap_read(info->regmap, info->regs->udr_update, &data);
} while (--retry && (data & info->regs->autoclear_udr_mask) && !ret);
if (!retry)
dev_err(info->dev, "waiting for UDR update, reached max number of retries\n");
return ret;
}
static int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
struct rtc_wkalrm *alarm)
{
int ret;
unsigned int val;
switch (info->device_type) {
case S5M8767X:
ret = regmap_read(info->regmap, S5M_RTC_STATUS, &val);
val &= S5M_ALARM0_STATUS;
break;
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
ret = regmap_read(info->s5m87xx->regmap_pmic, S2MPS14_REG_ST2,
&val);
val &= S2MPS_ALARM0_STATUS;
break;
default:
return -EINVAL;
}
if (ret < 0)
return ret;
if (val)
alarm->pending = 1;
else
alarm->pending = 0;
return 0;
}
static int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
{
int ret;
unsigned int data;
ret = regmap_read(info->regmap, info->regs->udr_update, &data);
if (ret < 0) {
dev_err(info->dev, "failed to read update reg(%d)\n", ret);
return ret;
}
data |= info->regs->write_time_udr_mask;
ret = regmap_write(info->regmap, info->regs->udr_update, data);
if (ret < 0) {
dev_err(info->dev, "failed to write update reg(%d)\n", ret);
return ret;
}
ret = s5m8767_wait_for_udr_update(info);
return ret;
}
static int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
{
int ret;
unsigned int data;
ret = regmap_read(info->regmap, info->regs->udr_update, &data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to read update reg(%d)\n",
__func__, ret);
return ret;
}
data |= info->regs->write_alarm_udr_mask;
switch (info->device_type) {
case S5M8767X:
data &= ~S5M_RTC_TIME_EN_MASK;
break;
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
/* No exceptions needed */
break;
default:
return -EINVAL;
}
ret = regmap_write(info->regmap, info->regs->udr_update, data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write update reg(%d)\n",
__func__, ret);
return ret;
}
ret = s5m8767_wait_for_udr_update(info);
/* On S2MPS13 the AUDR is not auto-cleared */
if (info->device_type == S2MPS13X)
regmap_update_bits(info->regmap, info->regs->udr_update,
S2MPS13_RTC_AUDR_MASK, 0);
return ret;
}
static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
u8 data[RTC_MAX_NUM_TIME_REGS];
int ret;
if (info->regs->read_time_udr_mask) {
ret = regmap_update_bits(info->regmap,
info->regs->udr_update,
info->regs->read_time_udr_mask,
info->regs->read_time_udr_mask);
if (ret) {
dev_err(dev,
"Failed to prepare registers for time reading: %d\n",
ret);
return ret;
}
}
ret = regmap_bulk_read(info->regmap, info->regs->time, data,
info->regs->regs_count);
if (ret < 0)
return ret;
switch (info->device_type) {
case S5M8767X:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
s5m8767_data_to_tm(data, tm, info->rtc_24hr_mode);
break;
default:
return -EINVAL;
}
dev_dbg(dev, "%s: %ptR(%d)\n", __func__, tm, tm->tm_wday);
return 0;
}
static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
u8 data[RTC_MAX_NUM_TIME_REGS];
int ret = 0;
switch (info->device_type) {
case S5M8767X:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
ret = s5m8767_tm_to_data(tm, data);
break;
default:
return -EINVAL;
}
if (ret < 0)
return ret;
dev_dbg(dev, "%s: %ptR(%d)\n", __func__, tm, tm->tm_wday);
ret = regmap_raw_write(info->regmap, info->regs->time, data,
info->regs->regs_count);
if (ret < 0)
return ret;
ret = s5m8767_rtc_set_time_reg(info);
return ret;
}
static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
u8 data[RTC_MAX_NUM_TIME_REGS];
int ret, i;
ret = regmap_bulk_read(info->regmap, info->regs->alarm0, data,
info->regs->regs_count);
if (ret < 0)
return ret;
switch (info->device_type) {
case S5M8767X:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
s5m8767_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
alrm->enabled = 0;
for (i = 0; i < info->regs->regs_count; i++) {
if (data[i] & ALARM_ENABLE_MASK) {
alrm->enabled = 1;
break;
}
}
break;
default:
return -EINVAL;
}
dev_dbg(dev, "%s: %ptR(%d)\n", __func__, &alrm->time, alrm->time.tm_wday);
return s5m_check_peding_alarm_interrupt(info, alrm);
}
static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
{
u8 data[RTC_MAX_NUM_TIME_REGS];
int ret, i;
struct rtc_time tm;
ret = regmap_bulk_read(info->regmap, info->regs->alarm0, data,
info->regs->regs_count);
if (ret < 0)
return ret;
s5m8767_data_to_tm(data, &tm, info->rtc_24hr_mode);
dev_dbg(info->dev, "%s: %ptR(%d)\n", __func__, &tm, tm.tm_wday);
switch (info->device_type) {
case S5M8767X:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
for (i = 0; i < info->regs->regs_count; i++)
data[i] &= ~ALARM_ENABLE_MASK;
ret = regmap_raw_write(info->regmap, info->regs->alarm0, data,
info->regs->regs_count);
if (ret < 0)
return ret;
ret = s5m8767_rtc_set_alarm_reg(info);
break;
default:
return -EINVAL;
}
return ret;
}
static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
{
int ret;
u8 data[RTC_MAX_NUM_TIME_REGS];
struct rtc_time tm;
ret = regmap_bulk_read(info->regmap, info->regs->alarm0, data,
info->regs->regs_count);
if (ret < 0)
return ret;
s5m8767_data_to_tm(data, &tm, info->rtc_24hr_mode);
dev_dbg(info->dev, "%s: %ptR(%d)\n", __func__, &tm, tm.tm_wday);
switch (info->device_type) {
case S5M8767X:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
data[RTC_SEC] |= ALARM_ENABLE_MASK;
data[RTC_MIN] |= ALARM_ENABLE_MASK;
data[RTC_HOUR] |= ALARM_ENABLE_MASK;
data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
if (data[RTC_DATE] & 0x1f)
data[RTC_DATE] |= ALARM_ENABLE_MASK;
if (data[RTC_MONTH] & 0xf)
data[RTC_MONTH] |= ALARM_ENABLE_MASK;
if (data[RTC_YEAR1] & 0x7f)
data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
ret = regmap_raw_write(info->regmap, info->regs->alarm0, data,
info->regs->regs_count);
if (ret < 0)
return ret;
ret = s5m8767_rtc_set_alarm_reg(info);
break;
default:
return -EINVAL;
}
return ret;
}
static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
u8 data[RTC_MAX_NUM_TIME_REGS];
int ret;
switch (info->device_type) {
case S5M8767X:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
s5m8767_tm_to_data(&alrm->time, data);
break;
default:
return -EINVAL;
}
dev_dbg(dev, "%s: %ptR(%d)\n", __func__, &alrm->time, alrm->time.tm_wday);
ret = s5m_rtc_stop_alarm(info);
if (ret < 0)
return ret;
ret = regmap_raw_write(info->regmap, info->regs->alarm0, data,
info->regs->regs_count);
if (ret < 0)
return ret;
ret = s5m8767_rtc_set_alarm_reg(info);
if (ret < 0)
return ret;
if (alrm->enabled)
ret = s5m_rtc_start_alarm(info);
return ret;
}
static int s5m_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
if (enabled)
return s5m_rtc_start_alarm(info);
else
return s5m_rtc_stop_alarm(info);
}
static irqreturn_t s5m_rtc_alarm_irq(int irq, void *data)
{
struct s5m_rtc_info *info = data;
rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops s5m_rtc_ops = {
.read_time = s5m_rtc_read_time,
.set_time = s5m_rtc_set_time,
.read_alarm = s5m_rtc_read_alarm,
.set_alarm = s5m_rtc_set_alarm,
.alarm_irq_enable = s5m_rtc_alarm_irq_enable,
};
static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
{
u8 data[2];
int ret;
switch (info->device_type) {
case S5M8767X:
/* UDR update time. Default of 7.32 ms is too long. */
ret = regmap_update_bits(info->regmap, S5M_RTC_UDR_CON,
S5M_RTC_UDR_T_MASK, S5M_RTC_UDR_T_450_US);
if (ret < 0)
dev_err(info->dev, "%s: fail to change UDR time: %d\n",
__func__, ret);
/* Set RTC control register : Binary mode, 24hour mode */
data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
ret = regmap_raw_write(info->regmap, S5M_ALARM0_CONF, data, 2);
break;
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
if (ret < 0)
break;
/*
* Should set WUDR & (RUDR or AUDR) bits to high after writing
* RTC_CTRL register like writing Alarm registers. We can't find
* the description from datasheet but vendor code does that
* really.
*/
ret = s5m8767_rtc_set_alarm_reg(info);
break;
default:
return -EINVAL;
}
info->rtc_24hr_mode = 1;
if (ret < 0) {
dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
__func__, ret);
return ret;
}
return ret;
}
static int s5m_rtc_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
struct s5m_rtc_info *info;
const struct regmap_config *regmap_cfg;
int ret, alarm_irq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
switch (platform_get_device_id(pdev)->driver_data) {
case S2MPS15X:
regmap_cfg = &s2mps14_rtc_regmap_config;
info->regs = &s2mps15_rtc_regs;
alarm_irq = S2MPS14_IRQ_RTCA0;
break;
case S2MPS14X:
regmap_cfg = &s2mps14_rtc_regmap_config;
info->regs = &s2mps14_rtc_regs;
alarm_irq = S2MPS14_IRQ_RTCA0;
break;
case S2MPS13X:
regmap_cfg = &s2mps14_rtc_regmap_config;
info->regs = &s2mps13_rtc_regs;
alarm_irq = S2MPS14_IRQ_RTCA0;
break;
case S5M8767X:
regmap_cfg = &s5m_rtc_regmap_config;
info->regs = &s5m_rtc_regs;
alarm_irq = S5M8767_IRQ_RTCA1;
break;
default:
dev_err(&pdev->dev,
"Device type %lu is not supported by RTC driver\n",
platform_get_device_id(pdev)->driver_data);
return -ENODEV;
}
info->i2c = devm_i2c_new_dummy_device(&pdev->dev, s5m87xx->i2c->adapter,
RTC_I2C_ADDR);
if (IS_ERR(info->i2c)) {
dev_err(&pdev->dev, "Failed to allocate I2C for RTC\n");
return PTR_ERR(info->i2c);
}
info->regmap = devm_regmap_init_i2c(info->i2c, regmap_cfg);
if (IS_ERR(info->regmap)) {
ret = PTR_ERR(info->regmap);
dev_err(&pdev->dev, "Failed to allocate RTC register map: %d\n",
ret);
return ret;
}
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
info->device_type = platform_get_device_id(pdev)->driver_data;
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
if (info->irq <= 0) {
dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
alarm_irq);
return -EINVAL;
}
}
platform_set_drvdata(pdev, info);
ret = s5m8767_rtc_init_reg(info);
if (ret)
return ret;
info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc_dev))
return PTR_ERR(info->rtc_dev);
info->rtc_dev->ops = &s5m_rtc_ops;
info->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
info->rtc_dev->range_max = RTC_TIMESTAMP_END_2099;
if (!info->irq) {
clear_bit(RTC_FEATURE_ALARM, info->rtc_dev->features);
} else {
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
s5m_rtc_alarm_irq, 0, "rtc-alarm0",
info);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
info->irq, ret);
return ret;
}
device_init_wakeup(&pdev->dev, 1);
}
return devm_rtc_register_device(info->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
static int s5m_rtc_resume(struct device *dev)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
int ret = 0;
if (info->irq && device_may_wakeup(dev))
ret = disable_irq_wake(info->irq);
return ret;
}
static int s5m_rtc_suspend(struct device *dev)
{
struct s5m_rtc_info *info = dev_get_drvdata(dev);
int ret = 0;
if (info->irq && device_may_wakeup(dev))
ret = enable_irq_wake(info->irq);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
{ "s2mps13-rtc", S2MPS13X },
{ "s2mps14-rtc", S2MPS14X },
{ "s2mps15-rtc", S2MPS15X },
{ },
};
MODULE_DEVICE_TABLE(platform, s5m_rtc_id);
static struct platform_driver s5m_rtc_driver = {
.driver = {
.name = "s5m-rtc",
.pm = &s5m_rtc_pm_ops,
},
.probe = s5m_rtc_probe,
.id_table = s5m_rtc_id,
};
module_platform_driver(s5m_rtc_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <[email protected]>");
MODULE_DESCRIPTION("Samsung S5M/S2MPS14 RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-s5m.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Epson's RTC module RX-8025 SA/NB
*
* Copyright (C) 2009 Wolfgang Grandegger <[email protected]>
*
* Copyright (C) 2005 by Digi International Inc.
* All rights reserved.
*
* Modified by fengjh at rising.com.cn
* <[email protected]>
* 2006.11
*
* Code cleanup by Sergei Poselenov, <[email protected]>
* Converted to new style by Wolfgang Grandegger <[email protected]>
* Alarm and periodic interrupt added by Dmitry Rakhchev <[email protected]>
*/
#include <linux/bcd.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/rtc.h>
/* Register definitions */
#define RX8025_REG_SEC 0x00
#define RX8025_REG_MIN 0x01
#define RX8025_REG_HOUR 0x02
#define RX8025_REG_WDAY 0x03
#define RX8025_REG_MDAY 0x04
#define RX8025_REG_MONTH 0x05
#define RX8025_REG_YEAR 0x06
#define RX8025_REG_DIGOFF 0x07
#define RX8025_REG_ALWMIN 0x08
#define RX8025_REG_ALWHOUR 0x09
#define RX8025_REG_ALWWDAY 0x0a
#define RX8025_REG_ALDMIN 0x0b
#define RX8025_REG_ALDHOUR 0x0c
/* 0x0d is reserved */
#define RX8025_REG_CTRL1 0x0e
#define RX8025_REG_CTRL2 0x0f
#define RX8025_BIT_CTRL1_CT (7 << 0)
/* 1 Hz periodic level irq */
#define RX8025_BIT_CTRL1_CT_1HZ 4
#define RX8025_BIT_CTRL1_TEST BIT(3)
#define RX8025_BIT_CTRL1_1224 BIT(5)
#define RX8025_BIT_CTRL1_DALE BIT(6)
#define RX8025_BIT_CTRL1_WALE BIT(7)
#define RX8025_BIT_CTRL2_DAFG BIT(0)
#define RX8025_BIT_CTRL2_WAFG BIT(1)
#define RX8025_BIT_CTRL2_CTFG BIT(2)
#define RX8025_BIT_CTRL2_PON BIT(4)
#define RX8025_BIT_CTRL2_XST BIT(5)
#define RX8025_BIT_CTRL2_VDET BIT(6)
#define RX8035_BIT_HOUR_1224 BIT(7)
/* Clock precision adjustment */
#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
#define RX8025_ADJ_DATA_MAX 62
#define RX8025_ADJ_DATA_MIN -62
enum rx_model {
model_rx_unknown,
model_rx_8025,
model_rx_8035,
model_last
};
static const struct i2c_device_id rx8025_id[] = {
{ "rx8025", model_rx_8025 },
{ "rx8035", model_rx_8035 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8025_id);
struct rx8025_data {
struct rtc_device *rtc;
enum rx_model model;
u8 ctrl1;
int is_24;
};
static s32 rx8025_read_reg(const struct i2c_client *client, u8 number)
{
return i2c_smbus_read_byte_data(client, number << 4);
}
static int rx8025_read_regs(const struct i2c_client *client,
u8 number, u8 length, u8 *values)
{
int ret = i2c_smbus_read_i2c_block_data(client, number << 4, length,
values);
if (ret != length)
return ret < 0 ? ret : -EIO;
return 0;
}
static s32 rx8025_write_reg(const struct i2c_client *client, u8 number,
u8 value)
{
return i2c_smbus_write_byte_data(client, number << 4, value);
}
static s32 rx8025_write_regs(const struct i2c_client *client,
u8 number, u8 length, const u8 *values)
{
return i2c_smbus_write_i2c_block_data(client, number << 4,
length, values);
}
static int rx8025_is_osc_stopped(enum rx_model model, int ctrl2)
{
int xstp = ctrl2 & RX8025_BIT_CTRL2_XST;
/* XSTP bit has different polarity on RX-8025 vs RX-8035.
* RX-8025: 0 == oscillator stopped
* RX-8035: 1 == oscillator stopped
*/
if (model == model_rx_8025)
xstp = !xstp;
return xstp;
}
static int rx8025_check_validity(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *drvdata = dev_get_drvdata(dev);
int ctrl2;
int xstp;
ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
return ctrl2;
if (ctrl2 & RX8025_BIT_CTRL2_VDET)
dev_warn(dev, "power voltage drop detected\n");
if (ctrl2 & RX8025_BIT_CTRL2_PON) {
dev_warn(dev, "power-on reset detected, date is invalid\n");
return -EINVAL;
}
xstp = rx8025_is_osc_stopped(drvdata->model, ctrl2);
if (xstp) {
dev_warn(dev, "crystal stopped, date is invalid\n");
return -EINVAL;
}
return 0;
}
static int rx8025_reset_validity(struct i2c_client *client)
{
struct rx8025_data *drvdata = i2c_get_clientdata(client);
int ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
return ctrl2;
ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET);
if (drvdata->model == model_rx_8025)
ctrl2 |= RX8025_BIT_CTRL2_XST;
else
ctrl2 &= ~(RX8025_BIT_CTRL2_XST);
return rx8025_write_reg(client, RX8025_REG_CTRL2,
ctrl2);
}
static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
int status, xstp;
rtc_lock(rx8025->rtc);
status = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (status < 0)
goto out;
xstp = rx8025_is_osc_stopped(rx8025->model, status);
if (xstp)
dev_warn(&client->dev, "Oscillation stop was detected,"
"you may have to readjust the clock\n");
if (status & RX8025_BIT_CTRL2_CTFG) {
/* periodic */
status &= ~RX8025_BIT_CTRL2_CTFG;
rtc_update_irq(rx8025->rtc, 1, RTC_PF | RTC_IRQF);
}
if (status & RX8025_BIT_CTRL2_DAFG) {
/* alarm */
status &= RX8025_BIT_CTRL2_DAFG;
if (rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1 & ~RX8025_BIT_CTRL1_DALE))
goto out;
rtc_update_irq(rx8025->rtc, 1, RTC_AF | RTC_IRQF);
}
out:
rtc_unlock(rx8025->rtc);
return IRQ_HANDLED;
}
static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
{
struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 date[7];
int err;
err = rx8025_check_validity(dev);
if (err)
return err;
err = rx8025_read_regs(client, RX8025_REG_SEC, 7, date);
if (err)
return err;
dev_dbg(dev, "%s: read %7ph\n", __func__, date);
dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f);
dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f);
if (rx8025->is_24)
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f);
else
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12
+ (date[RX8025_REG_HOUR] & 0x20 ? 12 : 0);
dt->tm_mday = bcd2bin(date[RX8025_REG_MDAY] & 0x3f);
dt->tm_mon = bcd2bin(date[RX8025_REG_MONTH] & 0x1f) - 1;
dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]) + 100;
dev_dbg(dev, "%s: date %ptRr\n", __func__, dt);
return 0;
}
static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
{
struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 date[7];
int ret;
/*
* Here the read-only bits are written as "0". I'm not sure if that
* is sound.
*/
date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec);
date[RX8025_REG_MIN] = bin2bcd(dt->tm_min);
if (rx8025->is_24)
date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour);
else
date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0)
| bin2bcd((dt->tm_hour + 11) % 12 + 1);
date[RX8025_REG_WDAY] = bin2bcd(dt->tm_wday);
date[RX8025_REG_MDAY] = bin2bcd(dt->tm_mday);
date[RX8025_REG_MONTH] = bin2bcd(dt->tm_mon + 1);
date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year - 100);
dev_dbg(dev, "%s: write %7ph\n", __func__, date);
ret = rx8025_write_regs(client, RX8025_REG_SEC, 7, date);
if (ret < 0)
return ret;
return rx8025_reset_validity(client);
}
static int rx8025_init_client(struct i2c_client *client)
{
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
u8 ctrl[2], ctrl2;
int need_clear = 0;
int hour_reg;
int err;
err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
if (err)
goto out;
/* Keep test bit zero ! */
rx8025->ctrl1 = ctrl[0] & ~RX8025_BIT_CTRL1_TEST;
if (ctrl[1] & (RX8025_BIT_CTRL2_DAFG | RX8025_BIT_CTRL2_WAFG)) {
dev_warn(&client->dev, "Alarm was detected\n");
need_clear = 1;
}
if (ctrl[1] & RX8025_BIT_CTRL2_CTFG)
need_clear = 1;
if (need_clear) {
ctrl2 = ctrl[1];
ctrl2 &= ~(RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG |
RX8025_BIT_CTRL2_DAFG);
err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
}
if (rx8025->model == model_rx_8035) {
/* In RX-8035, 12/24 flag is in the hour register */
hour_reg = rx8025_read_reg(client, RX8025_REG_HOUR);
if (hour_reg < 0)
return hour_reg;
rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224);
} else {
rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224);
}
out:
return err;
}
/* Alarm support */
static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 ald[2];
int ctrl2, err;
err = rx8025_read_regs(client, RX8025_REG_ALDMIN, 2, ald);
if (err)
return err;
ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
return ctrl2;
dev_dbg(dev, "%s: read alarm 0x%02x 0x%02x ctrl2 %02x\n",
__func__, ald[0], ald[1], ctrl2);
/* Hardware alarms precision is 1 minute! */
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(ald[0] & 0x7f);
if (rx8025->is_24)
t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
else
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
+ (ald[1] & 0x20 ? 12 : 0);
dev_dbg(dev, "%s: date: %ptRr\n", __func__, &t->time);
t->enabled = !!(rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE);
t->pending = (ctrl2 & RX8025_BIT_CTRL2_DAFG) && t->enabled;
return err;
}
static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 ald[2];
int err;
ald[0] = bin2bcd(t->time.tm_min);
if (rx8025->is_24)
ald[1] = bin2bcd(t->time.tm_hour);
else
ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
| bin2bcd((t->time.tm_hour + 11) % 12 + 1);
dev_dbg(dev, "%s: write 0x%02x 0x%02x\n", __func__, ald[0], ald[1]);
if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) {
rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
err = rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1);
if (err)
return err;
}
err = rx8025_write_regs(client, RX8025_REG_ALDMIN, 2, ald);
if (err)
return err;
if (t->enabled) {
rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE;
err = rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1);
if (err)
return err;
}
return 0;
}
static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 ctrl1;
int err;
ctrl1 = rx8025->ctrl1;
if (enabled)
ctrl1 |= RX8025_BIT_CTRL1_DALE;
else
ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
if (ctrl1 != rx8025->ctrl1) {
rx8025->ctrl1 = ctrl1;
err = rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1);
if (err)
return err;
}
return 0;
}
/*
* According to the RX8025 SA/NB application manual the frequency and
* temperature characteristics can be approximated using the following
* equation:
*
* df = a * (ut - t)**2
*
* df: Frequency deviation in any temperature
* a : Coefficient = (-35 +-5) * 10**-9
* ut: Ultimate temperature in degree = +25 +-5 degree
* t : Any temperature in degree
*/
static int rx8025_read_offset(struct device *dev, long *offset)
{
struct i2c_client *client = to_i2c_client(dev);
int digoff;
digoff = rx8025_read_reg(client, RX8025_REG_DIGOFF);
if (digoff < 0)
return digoff;
*offset = digoff >= 64 ? digoff - 128 : digoff;
if (*offset > 0)
(*offset)--;
*offset *= RX8025_ADJ_RESOLUTION;
return 0;
}
static int rx8025_set_offset(struct device *dev, long offset)
{
struct i2c_client *client = to_i2c_client(dev);
u8 digoff;
offset /= RX8025_ADJ_RESOLUTION;
if (offset > RX8025_ADJ_DATA_MAX)
offset = RX8025_ADJ_DATA_MAX;
else if (offset < RX8025_ADJ_DATA_MIN)
offset = RX8025_ADJ_DATA_MIN;
else if (offset > 0)
offset++;
else if (offset < 0)
offset += 128;
digoff = offset;
return rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
}
static const struct rtc_class_ops rx8025_rtc_ops = {
.read_time = rx8025_get_time,
.set_time = rx8025_set_time,
.read_alarm = rx8025_read_alarm,
.set_alarm = rx8025_set_alarm,
.alarm_irq_enable = rx8025_alarm_irq_enable,
.read_offset = rx8025_read_offset,
.set_offset = rx8025_set_offset,
};
static ssize_t rx8025_sysfs_show_clock_adjust(struct device *dev,
struct device_attribute *attr,
char *buf)
{
long adj;
int err;
dev_warn_once(dev, "clock_adjust_ppb is deprecated, use offset\n");
err = rx8025_read_offset(dev, &adj);
if (err)
return err;
return sprintf(buf, "%ld\n", -adj);
}
static ssize_t rx8025_sysfs_store_clock_adjust(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
long adj;
int err;
dev_warn_once(dev, "clock_adjust_ppb is deprecated, use offset\n");
if (kstrtol(buf, 10, &adj) != 0)
return -EINVAL;
err = rx8025_set_offset(dev, -adj);
return err ? err : count;
}
static DEVICE_ATTR(clock_adjust_ppb, S_IRUGO | S_IWUSR,
rx8025_sysfs_show_clock_adjust,
rx8025_sysfs_store_clock_adjust);
static struct attribute *rx8025_attrs[] = {
&dev_attr_clock_adjust_ppb.attr,
NULL
};
static const struct attribute_group rx8025_attr_group = {
.attrs = rx8025_attrs,
};
static int rx8025_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_match_id(rx8025_id, client);
struct i2c_adapter *adapter = client->adapter;
struct rx8025_data *rx8025;
int err = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_I2C_BLOCK)) {
dev_err(&adapter->dev,
"doesn't support required functionality\n");
return -EIO;
}
rx8025 = devm_kzalloc(&client->dev, sizeof(*rx8025), GFP_KERNEL);
if (!rx8025)
return -ENOMEM;
i2c_set_clientdata(client, rx8025);
if (id)
rx8025->model = id->driver_data;
err = rx8025_init_client(client);
if (err)
return err;
rx8025->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rx8025->rtc))
return PTR_ERR(rx8025->rtc);
rx8025->rtc->ops = &rx8025_rtc_ops;
rx8025->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
rx8025->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq > 0) {
dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
rx8025_handle_irq,
IRQF_ONESHOT,
"rx8025", client);
if (err)
clear_bit(RTC_FEATURE_ALARM, rx8025->rtc->features);
}
rx8025->rtc->max_user_freq = 1;
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rx8025->rtc->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rx8025->rtc->features);
err = rtc_add_group(rx8025->rtc, &rx8025_attr_group);
if (err)
return err;
return devm_rtc_register_device(rx8025->rtc);
}
static struct i2c_driver rx8025_driver = {
.driver = {
.name = "rtc-rx8025",
},
.probe = rx8025_probe,
.id_table = rx8025_id,
};
module_i2c_driver(rx8025_driver);
MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>");
MODULE_DESCRIPTION("RX-8025 SA/NB RTC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-rx8025.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale FlexTimer Module (FTM) alarm device driver.
*
* Copyright 2014 Freescale Semiconductor, Inc.
* Copyright 2019-2020 NXP
*
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/fsl/ftm.h>
#include <linux/rtc.h>
#include <linux/time.h>
#include <linux/acpi.h>
#include <linux/pm_wakeirq.h>
#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
/*
* Select Fixed frequency clock (32KHz) as clock source
* of FlexTimer Module
*/
#define FTM_SC_CLKS_FIXED_FREQ 0x02
#define FIXED_FREQ_CLK 32000
/* Select 128 (2^7) as divider factor */
#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK)
/* Maximum counter value in FlexTimer's CNT registers */
#define MAX_COUNT_VAL 0xffff
struct ftm_rtc {
struct rtc_device *rtc_dev;
void __iomem *base;
bool big_endian;
u32 alarm_freq;
};
static inline u32 rtc_readl(struct ftm_rtc *dev, u32 reg)
{
if (dev->big_endian)
return ioread32be(dev->base + reg);
else
return ioread32(dev->base + reg);
}
static inline void rtc_writel(struct ftm_rtc *dev, u32 reg, u32 val)
{
if (dev->big_endian)
iowrite32be(val, dev->base + reg);
else
iowrite32(val, dev->base + reg);
}
static inline void ftm_counter_enable(struct ftm_rtc *rtc)
{
u32 val;
/* select and enable counter clock source */
val = rtc_readl(rtc, FTM_SC);
val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ));
rtc_writel(rtc, FTM_SC, val);
}
static inline void ftm_counter_disable(struct ftm_rtc *rtc)
{
u32 val;
/* disable counter clock source */
val = rtc_readl(rtc, FTM_SC);
val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
rtc_writel(rtc, FTM_SC, val);
}
static inline void ftm_irq_acknowledge(struct ftm_rtc *rtc)
{
unsigned int timeout = 100;
/*
*Fix errata A-007728 for flextimer
* If the FTM counter reaches the FTM_MOD value between
* the reading of the TOF bit and the writing of 0 to
* the TOF bit, the process of clearing the TOF bit
* does not work as expected when FTMx_CONF[NUMTOF] != 0
* and the current TOF count is less than FTMx_CONF[NUMTOF].
* If the above condition is met, the TOF bit remains set.
* If the TOF interrupt is enabled (FTMx_SC[TOIE] = 1),the
* TOF interrupt also remains asserted.
*
* Above is the errata discription
*
* In one word: software clearing TOF bit not works when
* FTMx_CONF[NUMTOF] was seted as nonzero and FTM counter
* reaches the FTM_MOD value.
*
* The workaround is clearing TOF bit until it works
* (FTM counter doesn't always reache the FTM_MOD anyway),
* which may cost some cycles.
*/
while ((FTM_SC_TOF & rtc_readl(rtc, FTM_SC)) && timeout--)
rtc_writel(rtc, FTM_SC, rtc_readl(rtc, FTM_SC) & (~FTM_SC_TOF));
}
static inline void ftm_irq_enable(struct ftm_rtc *rtc)
{
u32 val;
val = rtc_readl(rtc, FTM_SC);
val |= FTM_SC_TOIE;
rtc_writel(rtc, FTM_SC, val);
}
static inline void ftm_irq_disable(struct ftm_rtc *rtc)
{
u32 val;
val = rtc_readl(rtc, FTM_SC);
val &= ~FTM_SC_TOIE;
rtc_writel(rtc, FTM_SC, val);
}
static inline void ftm_reset_counter(struct ftm_rtc *rtc)
{
/*
* The CNT register contains the FTM counter value.
* Reset clears the CNT register. Writing any value to COUNT
* updates the counter with its initial value, CNTIN.
*/
rtc_writel(rtc, FTM_CNT, 0x00);
}
static void ftm_clean_alarm(struct ftm_rtc *rtc)
{
ftm_counter_disable(rtc);
rtc_writel(rtc, FTM_CNTIN, 0x00);
rtc_writel(rtc, FTM_MOD, ~0U);
ftm_reset_counter(rtc);
}
static irqreturn_t ftm_rtc_alarm_interrupt(int irq, void *dev)
{
struct ftm_rtc *rtc = dev;
rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
ftm_irq_acknowledge(rtc);
ftm_irq_disable(rtc);
ftm_clean_alarm(rtc);
return IRQ_HANDLED;
}
static int ftm_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct ftm_rtc *rtc = dev_get_drvdata(dev);
if (enabled)
ftm_irq_enable(rtc);
else
ftm_irq_disable(rtc);
return 0;
}
/*
* Note:
* The function is not really getting time from the RTC
* since FlexTimer is not a RTC device, but we need to
* get time to setup alarm, so we are using system time
* for now.
*/
static int ftm_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
rtc_time64_to_tm(ktime_get_real_seconds(), tm);
return 0;
}
static int ftm_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
return 0;
}
/*
* 1. Select fixed frequency clock (32KHz) as clock source;
* 2. Select 128 (2^7) as divider factor;
* So clock is 250 Hz (32KHz/128).
*
* 3. FlexTimer's CNT register is a 32bit register,
* but the register's 16 bit as counter value,it's other 16 bit
* is reserved.So minimum counter value is 0x0,maximum counter
* value is 0xffff.
* So max alarm value is 262 (65536 / 250) seconds
*/
static int ftm_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
time64_t alm_time;
unsigned long long cycle;
struct ftm_rtc *rtc = dev_get_drvdata(dev);
alm_time = rtc_tm_to_time64(&alm->time);
ftm_clean_alarm(rtc);
cycle = (alm_time - ktime_get_real_seconds()) * rtc->alarm_freq;
if (cycle > MAX_COUNT_VAL) {
pr_err("Out of alarm range {0~262} seconds.\n");
return -ERANGE;
}
ftm_irq_disable(rtc);
/*
* The counter increments until the value of MOD is reached,
* at which point the counter is reloaded with the value of CNTIN.
* The TOF (the overflow flag) bit is set when the FTM counter
* changes from MOD to CNTIN. So we should using the cycle - 1.
*/
rtc_writel(rtc, FTM_MOD, cycle - 1);
ftm_counter_enable(rtc);
ftm_irq_enable(rtc);
return 0;
}
static const struct rtc_class_ops ftm_rtc_ops = {
.read_time = ftm_rtc_read_time,
.read_alarm = ftm_rtc_read_alarm,
.set_alarm = ftm_rtc_set_alarm,
.alarm_irq_enable = ftm_rtc_alarm_irq_enable,
};
static int ftm_rtc_probe(struct platform_device *pdev)
{
int irq;
int ret;
struct ftm_rtc *rtc;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc)) {
dev_err(&pdev->dev, "cannot alloc memory for rtc\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base)) {
dev_err(&pdev->dev, "cannot ioremap resource for rtc\n");
return PTR_ERR(rtc->base);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, ftm_rtc_alarm_interrupt,
0, dev_name(&pdev->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
rtc->big_endian =
device_property_read_bool(&pdev->dev, "big-endian");
rtc->alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV;
rtc->rtc_dev->ops = &ftm_rtc_ops;
device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret) {
dev_err(&pdev->dev, "can't register rtc device\n");
return ret;
}
return 0;
}
static const struct of_device_id ftm_rtc_match[] = {
{ .compatible = "fsl,ls1012a-ftm-alarm", },
{ .compatible = "fsl,ls1021a-ftm-alarm", },
{ .compatible = "fsl,ls1028a-ftm-alarm", },
{ .compatible = "fsl,ls1043a-ftm-alarm", },
{ .compatible = "fsl,ls1046a-ftm-alarm", },
{ .compatible = "fsl,ls1088a-ftm-alarm", },
{ .compatible = "fsl,ls208xa-ftm-alarm", },
{ .compatible = "fsl,lx2160a-ftm-alarm", },
{ },
};
MODULE_DEVICE_TABLE(of, ftm_rtc_match);
static const struct acpi_device_id ftm_imx_acpi_ids[] = {
{"NXP0014",},
{ }
};
MODULE_DEVICE_TABLE(acpi, ftm_imx_acpi_ids);
static struct platform_driver ftm_rtc_driver = {
.probe = ftm_rtc_probe,
.driver = {
.name = "ftm-alarm",
.of_match_table = ftm_rtc_match,
.acpi_match_table = ACPI_PTR(ftm_imx_acpi_ids),
},
};
module_platform_driver(ftm_rtc_driver);
MODULE_DESCRIPTION("NXP/Freescale FlexTimer alarm driver");
MODULE_AUTHOR("Biwen Li <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/rtc/rtc-fsl-ftm-alarm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rtc-palmas.c -- Palmas Real Time Clock driver.
* RTC driver for TI Palma series devices like TPS65913,
* TPS65914 power management IC.
*
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/bcd.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/palmas.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
struct palmas_rtc {
struct rtc_device *rtc;
struct device *dev;
unsigned int irq;
};
/* Total number of RTC registers needed to set time*/
#define PALMAS_NUM_TIME_REGS (PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1)
static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
struct palmas *palmas = dev_get_drvdata(dev->parent);
int ret;
/* Copy RTC counting registers to static registers or latches */
ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
PALMAS_RTC_CTRL_REG_GET_TIME, PALMAS_RTC_CTRL_REG_GET_TIME);
if (ret < 0) {
dev_err(dev, "RTC CTRL reg update failed, err: %d\n", ret);
return ret;
}
ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG,
rtc_data, PALMAS_NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "RTC_SECONDS reg read failed, err = %d\n", ret);
return ret;
}
tm->tm_sec = bcd2bin(rtc_data[0]);
tm->tm_min = bcd2bin(rtc_data[1]);
tm->tm_hour = bcd2bin(rtc_data[2]);
tm->tm_mday = bcd2bin(rtc_data[3]);
tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
tm->tm_year = bcd2bin(rtc_data[5]) + 100;
return ret;
}
static int palmas_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
struct palmas *palmas = dev_get_drvdata(dev->parent);
int ret;
rtc_data[0] = bin2bcd(tm->tm_sec);
rtc_data[1] = bin2bcd(tm->tm_min);
rtc_data[2] = bin2bcd(tm->tm_hour);
rtc_data[3] = bin2bcd(tm->tm_mday);
rtc_data[4] = bin2bcd(tm->tm_mon + 1);
rtc_data[5] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the RTC time registers */
ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
PALMAS_RTC_CTRL_REG_STOP_RTC, 0);
if (ret < 0) {
dev_err(dev, "RTC stop failed, err = %d\n", ret);
return ret;
}
ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE, PALMAS_SECONDS_REG,
rtc_data, PALMAS_NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "RTC_SECONDS reg write failed, err = %d\n", ret);
return ret;
}
/* Start back RTC */
ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
PALMAS_RTC_CTRL_REG_STOP_RTC, PALMAS_RTC_CTRL_REG_STOP_RTC);
if (ret < 0)
dev_err(dev, "RTC start failed, err = %d\n", ret);
return ret;
}
static int palmas_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
{
struct palmas *palmas = dev_get_drvdata(dev->parent);
u8 val;
val = enabled ? PALMAS_RTC_INTERRUPTS_REG_IT_ALARM : 0;
return palmas_write(palmas, PALMAS_RTC_BASE,
PALMAS_RTC_INTERRUPTS_REG, val);
}
static int palmas_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char alarm_data[PALMAS_NUM_TIME_REGS];
u32 int_val;
struct palmas *palmas = dev_get_drvdata(dev->parent);
int ret;
ret = palmas_bulk_read(palmas, PALMAS_RTC_BASE,
PALMAS_ALARM_SECONDS_REG,
alarm_data, PALMAS_NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "RTC_ALARM_SECONDS read failed, err = %d\n", ret);
return ret;
}
alm->time.tm_sec = bcd2bin(alarm_data[0]);
alm->time.tm_min = bcd2bin(alarm_data[1]);
alm->time.tm_hour = bcd2bin(alarm_data[2]);
alm->time.tm_mday = bcd2bin(alarm_data[3]);
alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1;
alm->time.tm_year = bcd2bin(alarm_data[5]) + 100;
ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG,
&int_val);
if (ret < 0) {
dev_err(dev, "RTC_INTERRUPTS reg read failed, err = %d\n", ret);
return ret;
}
if (int_val & PALMAS_RTC_INTERRUPTS_REG_IT_ALARM)
alm->enabled = 1;
return ret;
}
static int palmas_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char alarm_data[PALMAS_NUM_TIME_REGS];
struct palmas *palmas = dev_get_drvdata(dev->parent);
int ret;
ret = palmas_rtc_alarm_irq_enable(dev, 0);
if (ret < 0) {
dev_err(dev, "Disable RTC alarm failed\n");
return ret;
}
alarm_data[0] = bin2bcd(alm->time.tm_sec);
alarm_data[1] = bin2bcd(alm->time.tm_min);
alarm_data[2] = bin2bcd(alm->time.tm_hour);
alarm_data[3] = bin2bcd(alm->time.tm_mday);
alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE,
PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "ALARM_SECONDS_REG write failed, err = %d\n", ret);
return ret;
}
if (alm->enabled)
ret = palmas_rtc_alarm_irq_enable(dev, 1);
return ret;
}
static int palmas_clear_interrupts(struct device *dev)
{
struct palmas *palmas = dev_get_drvdata(dev->parent);
unsigned int rtc_reg;
int ret;
ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG,
&rtc_reg);
if (ret < 0) {
dev_err(dev, "RTC_STATUS read failed, err = %d\n", ret);
return ret;
}
ret = palmas_write(palmas, PALMAS_RTC_BASE, PALMAS_RTC_STATUS_REG,
rtc_reg);
if (ret < 0) {
dev_err(dev, "RTC_STATUS write failed, err = %d\n", ret);
return ret;
}
return 0;
}
static irqreturn_t palmas_rtc_interrupt(int irq, void *context)
{
struct palmas_rtc *palmas_rtc = context;
struct device *dev = palmas_rtc->dev;
int ret;
ret = palmas_clear_interrupts(dev);
if (ret < 0) {
dev_err(dev, "RTC interrupt clear failed, err = %d\n", ret);
return IRQ_NONE;
}
rtc_update_irq(palmas_rtc->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops palmas_rtc_ops = {
.read_time = palmas_rtc_read_time,
.set_time = palmas_rtc_set_time,
.read_alarm = palmas_rtc_read_alarm,
.set_alarm = palmas_rtc_set_alarm,
.alarm_irq_enable = palmas_rtc_alarm_irq_enable,
};
static int palmas_rtc_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct palmas_rtc *palmas_rtc = NULL;
int ret;
bool enable_bb_charging = false;
bool high_bb_charging = false;
if (pdev->dev.of_node) {
enable_bb_charging = of_property_read_bool(pdev->dev.of_node,
"ti,backup-battery-chargeable");
high_bb_charging = of_property_read_bool(pdev->dev.of_node,
"ti,backup-battery-charge-high-current");
}
palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc),
GFP_KERNEL);
if (!palmas_rtc)
return -ENOMEM;
/* Clear pending interrupts */
ret = palmas_clear_interrupts(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "clear RTC int failed, err = %d\n", ret);
return ret;
}
palmas_rtc->dev = &pdev->dev;
platform_set_drvdata(pdev, palmas_rtc);
if (enable_bb_charging) {
unsigned reg = PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG;
if (high_bb_charging)
reg = 0;
ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
PALMAS_BACKUP_BATTERY_CTRL,
PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG, reg);
if (ret < 0) {
dev_err(&pdev->dev,
"BACKUP_BATTERY_CTRL update failed, %d\n", ret);
return ret;
}
ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
PALMAS_BACKUP_BATTERY_CTRL,
PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN,
PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN);
if (ret < 0) {
dev_err(&pdev->dev,
"BACKUP_BATTERY_CTRL update failed, %d\n", ret);
return ret;
}
}
/* Start RTC */
ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
PALMAS_RTC_CTRL_REG_STOP_RTC,
PALMAS_RTC_CTRL_REG_STOP_RTC);
if (ret < 0) {
dev_err(&pdev->dev, "RTC_CTRL write failed, err = %d\n", ret);
return ret;
}
palmas_rtc->irq = platform_get_irq(pdev, 0);
device_init_wakeup(&pdev->dev, 1);
palmas_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&palmas_rtc_ops, THIS_MODULE);
if (IS_ERR(palmas_rtc->rtc)) {
ret = PTR_ERR(palmas_rtc->rtc);
dev_err(&pdev->dev, "RTC register failed, err = %d\n", ret);
return ret;
}
ret = devm_request_threaded_irq(&pdev->dev, palmas_rtc->irq, NULL,
palmas_rtc_interrupt,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
dev_name(&pdev->dev), palmas_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ request failed, err = %d\n", ret);
return ret;
}
return 0;
}
static void palmas_rtc_remove(struct platform_device *pdev)
{
palmas_rtc_alarm_irq_enable(&pdev->dev, 0);
}
#ifdef CONFIG_PM_SLEEP
static int palmas_rtc_suspend(struct device *dev)
{
struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(palmas_rtc->irq);
return 0;
}
static int palmas_rtc_resume(struct device *dev)
{
struct palmas_rtc *palmas_rtc = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
disable_irq_wake(palmas_rtc->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(palmas_rtc_pm_ops, palmas_rtc_suspend,
palmas_rtc_resume);
#ifdef CONFIG_OF
static const struct of_device_id of_palmas_rtc_match[] = {
{ .compatible = "ti,palmas-rtc"},
{ },
};
MODULE_DEVICE_TABLE(of, of_palmas_rtc_match);
#endif
static struct platform_driver palmas_rtc_driver = {
.probe = palmas_rtc_probe,
.remove_new = palmas_rtc_remove,
.driver = {
.name = "palmas-rtc",
.pm = &palmas_rtc_pm_ops,
.of_match_table = of_match_ptr(of_palmas_rtc_match),
},
};
module_platform_driver(palmas_rtc_driver);
MODULE_ALIAS("platform:palmas_rtc");
MODULE_DESCRIPTION("TI PALMAS series RTC driver");
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/rtc/rtc-palmas.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Zorro Device Name Tables
*
* Copyright (C) 1999--2000 Geert Uytterhoeven
*
* Based on the PCI version:
*
* Copyright 1992--1999 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang, Martin Mares
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/zorro.h>
#include "zorro.h"
struct zorro_prod_info {
__u16 prod;
unsigned short seen;
const char *name;
};
struct zorro_manuf_info {
__u16 manuf;
unsigned short nr;
const char *name;
struct zorro_prod_info *prods;
};
/*
* This is ridiculous, but we want the strings in
* the .init section so that they don't take up
* real memory.. Parse the same file multiple times
* to get all the info.
*/
#define MANUF( manuf, name ) static char __manufstr_##manuf[] __initdata = name;
#define ENDMANUF()
#define PRODUCT( manuf, prod, name ) static char __prodstr_##manuf##prod[] __initdata = name;
#include "devlist.h"
#define MANUF( manuf, name ) static struct zorro_prod_info __prods_##manuf[] __initdata = {
#define ENDMANUF() };
#define PRODUCT( manuf, prod, name ) { 0x##prod, 0, __prodstr_##manuf##prod },
#include "devlist.h"
static struct zorro_manuf_info __initdata zorro_manuf_list[] = {
#define MANUF( manuf, name ) { 0x##manuf, ARRAY_SIZE(__prods_##manuf), __manufstr_##manuf, __prods_##manuf },
#define ENDMANUF()
#define PRODUCT( manuf, prod, name )
#include "devlist.h"
};
#define MANUFS ARRAY_SIZE(zorro_manuf_list)
void __init zorro_name_device(struct zorro_dev *dev)
{
const struct zorro_manuf_info *manuf_p = zorro_manuf_list;
int i = MANUFS;
char *name = dev->name;
do {
if (manuf_p->manuf == ZORRO_MANUF(dev->id))
goto match_manuf;
manuf_p++;
} while (--i);
/* Couldn't find either the manufacturer nor the product */
return;
match_manuf: {
struct zorro_prod_info *prod_p = manuf_p->prods;
int i = manuf_p->nr;
while (i > 0) {
if (prod_p->prod ==
((ZORRO_PROD(dev->id)<<8) | ZORRO_EPC(dev->id)))
goto match_prod;
prod_p++;
i--;
}
/* Ok, found the manufacturer, but unknown product */
sprintf(name, "Zorro device %08x (%s)", dev->id, manuf_p->name);
return;
/* Full match */
match_prod: {
char *n = name + sprintf(name, "%s %s", manuf_p->name, prod_p->name);
int nr = prod_p->seen + 1;
prod_p->seen = nr;
if (nr > 1)
sprintf(n, " (#%d)", nr);
}
}
}
| linux-master | drivers/zorro/names.c |
/*
* Zorro Driver Services
*
* Copyright (C) 2003 Geert Uytterhoeven
*
* Loosely based on drivers/pci/pci-driver.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/zorro.h>
#include "zorro.h"
/**
* zorro_match_device - Tell if a Zorro device structure has a matching
* Zorro device id structure
* @ids: array of Zorro device id structures to search in
* @dev: the Zorro device structure to match against
*
* Used by a driver to check whether a Zorro device present in the
* system is in its list of supported devices. Returns the matching
* zorro_device_id structure or %NULL if there is no match.
*/
static const struct zorro_device_id *
zorro_match_device(const struct zorro_device_id *ids,
const struct zorro_dev *z)
{
while (ids->id) {
if (ids->id == ZORRO_WILDCARD || ids->id == z->id)
return ids;
ids++;
}
return NULL;
}
static int zorro_device_probe(struct device *dev)
{
int error = 0;
struct zorro_driver *drv = to_zorro_driver(dev->driver);
struct zorro_dev *z = to_zorro_dev(dev);
if (drv->probe) {
const struct zorro_device_id *id;
id = zorro_match_device(drv->id_table, z);
if (id)
error = drv->probe(z, id);
if (error >= 0)
error = 0;
}
return error;
}
static void zorro_device_remove(struct device *dev)
{
struct zorro_dev *z = to_zorro_dev(dev);
struct zorro_driver *drv = to_zorro_driver(dev->driver);
if (drv->remove)
drv->remove(z);
}
/**
* zorro_register_driver - register a new Zorro driver
* @drv: the driver structure to register
*
* Adds the driver structure to the list of registered drivers
* Returns zero or a negative error value.
*/
int zorro_register_driver(struct zorro_driver *drv)
{
/* initialize common driver fields */
drv->driver.name = drv->name;
drv->driver.bus = &zorro_bus_type;
/* register with core */
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(zorro_register_driver);
/**
* zorro_unregister_driver - unregister a zorro driver
* @drv: the driver structure to unregister
*
* Deletes the driver structure from the list of registered Zorro drivers,
* gives it a chance to clean up by calling its remove() function for
* each device it was responsible for, and marks those devices as
* driverless.
*/
void zorro_unregister_driver(struct zorro_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(zorro_unregister_driver);
/**
* zorro_bus_match - Tell if a Zorro device structure has a matching Zorro
* device id structure
* @ids: array of Zorro device id structures to search in
* @dev: the Zorro device structure to match against
*
* Used by the driver core to check whether a Zorro device present in the
* system is in a driver's list of supported devices. Returns 1 if
* supported, and 0 if there is no match.
*/
static int zorro_bus_match(struct device *dev, struct device_driver *drv)
{
struct zorro_dev *z = to_zorro_dev(dev);
struct zorro_driver *zorro_drv = to_zorro_driver(drv);
const struct zorro_device_id *ids = zorro_drv->id_table;
if (!ids)
return 0;
return !!zorro_match_device(ids, z);
}
static int zorro_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct zorro_dev *z;
if (!dev)
return -ENODEV;
z = to_zorro_dev(dev);
if (!z)
return -ENODEV;
if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) ||
add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) ||
add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) ||
add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id))
return -ENOMEM;
return 0;
}
struct bus_type zorro_bus_type = {
.name = "zorro",
.dev_name = "zorro",
.dev_groups = zorro_device_attribute_groups,
.match = zorro_bus_match,
.uevent = zorro_uevent,
.probe = zorro_device_probe,
.remove = zorro_device_remove,
};
EXPORT_SYMBOL(zorro_bus_type);
static int __init zorro_driver_init(void)
{
return bus_register(&zorro_bus_type);
}
postcore_initcall(zorro_driver_init);
| linux-master | drivers/zorro/zorro-driver.c |
/*
* File Attributes for Zorro Devices
*
* Copyright (C) 2003 Geert Uytterhoeven
*
* Loosely based on drivers/pci/pci-sysfs.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/zorro.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#include "zorro.h"
/* show configuration fields */
#define zorro_config_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct zorro_dev *z; \
\
z = to_zorro_dev(dev); \
return sprintf(buf, format_string, z->field); \
} \
static DEVICE_ATTR_RO(name);
zorro_config_attr(id, id, "0x%08x\n");
zorro_config_attr(type, rom.er_Type, "0x%02x\n");
zorro_config_attr(slotaddr, slotaddr, "0x%04x\n");
zorro_config_attr(slotsize, slotsize, "0x%04x\n");
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zorro_dev *z;
z = to_zorro_dev(dev);
return sprintf(buf, "0x%08x\n", be32_to_cpu(z->rom.er_SerialNumber));
}
static DEVICE_ATTR_RO(serial);
static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zorro_dev *z = to_zorro_dev(dev);
return sprintf(buf, "0x%08lx 0x%08lx 0x%08lx\n",
(unsigned long)zorro_resource_start(z),
(unsigned long)zorro_resource_end(z),
zorro_resource_flags(z));
}
static DEVICE_ATTR_RO(resource);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zorro_dev *z = to_zorro_dev(dev);
return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *zorro_device_attrs[] = {
&dev_attr_id.attr,
&dev_attr_type.attr,
&dev_attr_serial.attr,
&dev_attr_slotaddr.attr,
&dev_attr_slotsize.attr,
&dev_attr_resource.attr,
&dev_attr_modalias.attr,
NULL
};
static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
struct ConfigDev cd;
/* Construct a ConfigDev */
memset(&cd, 0, sizeof(cd));
cd.cd_Rom = z->rom;
cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
cd.cd_SlotSize = cpu_to_be16(z->slotsize);
cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
}
static struct bin_attribute zorro_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO,
},
.size = sizeof(struct ConfigDev),
.read = zorro_read_config,
};
static struct bin_attribute *zorro_device_bin_attrs[] = {
&zorro_config_attr,
NULL
};
static const struct attribute_group zorro_device_attr_group = {
.attrs = zorro_device_attrs,
.bin_attrs = zorro_device_bin_attrs,
};
const struct attribute_group *zorro_device_attribute_groups[] = {
&zorro_device_attr_group,
NULL
};
| linux-master | drivers/zorro/zorro-sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generate devlist.h from the Zorro ID file.
*
* (c) 2000 Geert Uytterhoeven <[email protected]>
*
* Based on the PCI version:
*
* (c) 1999--2000 Martin Mares <[email protected]>
*/
#include <stdio.h>
#include <string.h>
#define MAX_NAME_SIZE 63
static void
pq(FILE *f, const char *c)
{
while (*c) {
if (*c == '"')
fprintf(f, "\\\"");
else
fputc(*c, f);
c++;
}
}
int
main(void)
{
char line[1024], *c, *bra, manuf[8];
int manufs = 0;
int mode = 0;
int lino = 0;
int manuf_len = 0;
FILE *devf;
devf = fopen("devlist.h", "w");
if (!devf) {
fprintf(stderr, "Cannot create output file!\n");
return 1;
}
while (fgets(line, sizeof(line)-1, stdin)) {
lino++;
if ((c = strchr(line, '\n')))
*c = 0;
if (!line[0] || line[0] == '#')
continue;
if (line[0] == '\t') {
switch (mode) {
case 1:
if (strlen(line) > 5 && line[5] == ' ') {
c = line + 5;
while (*c == ' ')
*c++ = 0;
if (manuf_len + strlen(c) + 1 > MAX_NAME_SIZE) {
/* Too long, try cutting off long description */
bra = strchr(c, '[');
if (bra && bra > c && bra[-1] == ' ')
bra[-1] = 0;
if (manuf_len + strlen(c) + 1 > MAX_NAME_SIZE) {
fprintf(stderr, "Line %d: Product name too long\n", lino);
return 1;
}
}
fprintf(devf, "\tPRODUCT(%s,%s,\"", manuf, line+1);
pq(devf, c);
fputs("\")\n", devf);
} else goto err;
break;
default:
goto err;
}
} else if (strlen(line) > 4 && line[4] == ' ') {
c = line + 4;
while (*c == ' ')
*c++ = 0;
if (manufs)
fputs("ENDMANUF()\n\n", devf);
manufs++;
strcpy(manuf, line);
manuf_len = strlen(c);
if (manuf_len + 24 > MAX_NAME_SIZE) {
fprintf(stderr, "Line %d: manufacturer name too long\n", lino);
return 1;
}
fprintf(devf, "MANUF(%s,\"", manuf);
pq(devf, c);
fputs("\")\n", devf);
mode = 1;
} else {
err:
fprintf(stderr, "Line %d: Syntax error in mode %d: %s\n", lino, mode, line);
return 1;
}
}
fputs("ENDMANUF()\n\
\n\
#undef MANUF\n\
#undef PRODUCT\n\
#undef ENDMANUF\n", devf);
fclose(devf);
return 0;
}
| linux-master | drivers/zorro/gen-devlist.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Procfs interface for the Zorro bus.
*
* Copyright (C) 1998-2003 Geert Uytterhoeven
*
* Heavily based on the procfs interface for the PCI bus, which is
*
* Copyright (C) 1997, 1998 Martin Mares <[email protected]>
*/
#include <linux/types.h>
#include <linux/zorro.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/export.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
#include <asm/amigahw.h>
#include <asm/setup.h>
static loff_t
proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
{
return fixed_size_llseek(file, off, whence, sizeof(struct ConfigDev));
}
static ssize_t
proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
struct zorro_dev *z = pde_data(file_inode(file));
struct ConfigDev cd;
loff_t pos = *ppos;
if (pos >= sizeof(struct ConfigDev))
return 0;
if (nbytes >= sizeof(struct ConfigDev))
nbytes = sizeof(struct ConfigDev);
if (pos + nbytes > sizeof(struct ConfigDev))
nbytes = sizeof(struct ConfigDev) - pos;
/* Construct a ConfigDev */
memset(&cd, 0, sizeof(cd));
cd.cd_Rom = z->rom;
cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
cd.cd_SlotSize = cpu_to_be16(z->slotsize);
cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
if (copy_to_user(buf, (void *)&cd + pos, nbytes))
return -EFAULT;
*ppos += nbytes;
return nbytes;
}
static const struct proc_ops bus_zorro_proc_ops = {
.proc_lseek = proc_bus_zorro_lseek,
.proc_read = proc_bus_zorro_read,
};
static void * zorro_seq_start(struct seq_file *m, loff_t *pos)
{
return (*pos < zorro_num_autocon) ? pos : NULL;
}
static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (*pos < zorro_num_autocon) ? pos : NULL;
}
static void zorro_seq_stop(struct seq_file *m, void *v)
{
}
static int zorro_seq_show(struct seq_file *m, void *v)
{
unsigned int slot = *(loff_t *)v;
struct zorro_dev *z = &zorro_autocon[slot];
seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
(unsigned long)zorro_resource_start(z),
(unsigned long)zorro_resource_len(z),
z->rom.er_Type);
return 0;
}
static const struct seq_operations zorro_devices_seq_ops = {
.start = zorro_seq_start,
.next = zorro_seq_next,
.stop = zorro_seq_stop,
.show = zorro_seq_show,
};
static struct proc_dir_entry *proc_bus_zorro_dir;
static int __init zorro_proc_attach_device(unsigned int slot)
{
struct proc_dir_entry *entry;
char name[4];
sprintf(name, "%02x", slot);
entry = proc_create_data(name, 0, proc_bus_zorro_dir,
&bus_zorro_proc_ops,
&zorro_autocon[slot]);
if (!entry)
return -ENOMEM;
proc_set_size(entry, sizeof(struct zorro_dev));
return 0;
}
static int __init zorro_proc_init(void)
{
unsigned int slot;
if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
proc_create_seq("devices", 0, proc_bus_zorro_dir,
&zorro_devices_seq_ops);
for (slot = 0; slot < zorro_num_autocon; slot++)
zorro_proc_attach_device(slot);
}
return 0;
}
device_initcall(zorro_proc_init);
| linux-master | drivers/zorro/proc.c |
/*
* Zorro Bus Services
*
* Copyright (C) 1995-2003 Geert Uytterhoeven
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/zorro.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
#include "zorro.h"
/*
* Zorro Expansion Devices
*/
unsigned int zorro_num_autocon;
struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata;
struct zorro_dev *zorro_autocon;
/*
* Zorro bus
*/
struct zorro_bus {
struct device dev;
struct zorro_dev devices[];
};
/*
* Find Zorro Devices
*/
struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
{
struct zorro_dev *z;
if (!zorro_num_autocon)
return NULL;
for (z = from ? from+1 : &zorro_autocon[0];
z < zorro_autocon+zorro_num_autocon;
z++)
if (id == ZORRO_WILDCARD || id == z->id)
return z;
return NULL;
}
EXPORT_SYMBOL(zorro_find_device);
/*
* Bitmask indicating portions of available Zorro II RAM that are unused
* by the system. Every bit represents a 64K chunk, for a maximum of 8MB
* (128 chunks, physical 0x00200000-0x009fffff).
*
* If you want to use (= allocate) portions of this RAM, you should clear
* the corresponding bits.
*
* Possible uses:
* - z2ram device
* - SCSI DMA bounce buffers
*
* FIXME: use the normal resource management
*/
DECLARE_BITMAP(zorro_unused_z2ram, 128);
EXPORT_SYMBOL(zorro_unused_z2ram);
static void __init mark_region(unsigned long start, unsigned long end,
int flag)
{
if (flag)
start += Z2RAM_CHUNKMASK;
else
end += Z2RAM_CHUNKMASK;
start &= ~Z2RAM_CHUNKMASK;
end &= ~Z2RAM_CHUNKMASK;
if (end <= Z2RAM_START || start >= Z2RAM_END)
return;
start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
while (start < end) {
u32 chunk = start>>Z2RAM_CHUNKSHIFT;
if (flag)
set_bit(chunk, zorro_unused_z2ram);
else
clear_bit(chunk, zorro_unused_z2ram);
start += Z2RAM_CHUNKSIZE;
}
}
static struct resource __init *zorro_find_parent_resource(
struct platform_device *bridge, struct zorro_dev *z)
{
int i;
for (i = 0; i < bridge->num_resources; i++) {
struct resource *r = &bridge->resource[i];
if (zorro_resource_start(z) >= r->start &&
zorro_resource_end(z) <= r->end)
return r;
}
return &iomem_resource;
}
static int __init amiga_zorro_probe(struct platform_device *pdev)
{
struct zorro_bus *bus;
struct zorro_dev_init *zi;
struct zorro_dev *z;
struct resource *r;
unsigned int i;
int error;
/* Initialize the Zorro bus */
bus = kzalloc(struct_size(bus, devices, zorro_num_autocon),
GFP_KERNEL);
if (!bus)
return -ENOMEM;
zorro_autocon = bus->devices;
bus->dev.parent = &pdev->dev;
dev_set_name(&bus->dev, zorro_bus_type.name);
error = device_register(&bus->dev);
if (error) {
pr_err("Zorro: Error registering zorro_bus\n");
put_device(&bus->dev);
kfree(bus);
return error;
}
platform_set_drvdata(pdev, bus);
pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
/* First identify all devices ... */
for (i = 0; i < zorro_num_autocon; i++) {
zi = &zorro_autocon_init[i];
z = &zorro_autocon[i];
z->rom = zi->rom;
z->id = (be16_to_cpu(z->rom.er_Manufacturer) << 16) |
(z->rom.er_Product << 8);
if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
/* GVP quirk */
unsigned long magic = zi->boardaddr + 0x8000;
z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
}
z->slotaddr = zi->slotaddr;
z->slotsize = zi->slotsize;
sprintf(z->name, "Zorro device %08x", z->id);
zorro_name_device(z);
z->resource.start = zi->boardaddr;
z->resource.end = zi->boardaddr + zi->boardsize - 1;
z->resource.name = z->name;
r = zorro_find_parent_resource(pdev, z);
error = request_resource(r, &z->resource);
if (error && !(z->rom.er_Type & ERTF_MEMLIST))
dev_err(&bus->dev,
"Address space collision on device %s %pR\n",
z->name, &z->resource);
z->dev.parent = &bus->dev;
z->dev.bus = &zorro_bus_type;
z->dev.id = i;
switch (z->rom.er_Type & ERT_TYPEMASK) {
case ERT_ZORROIII:
z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
break;
case ERT_ZORROII:
default:
z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
break;
}
z->dev.dma_mask = &z->dev.coherent_dma_mask;
}
/* ... then register them */
for (i = 0; i < zorro_num_autocon; i++) {
z = &zorro_autocon[i];
error = device_register(&z->dev);
if (error) {
dev_err(&bus->dev, "Error registering device %s\n",
z->name);
put_device(&z->dev);
continue;
}
}
/* Mark all available Zorro II memory */
zorro_for_each_dev(z) {
if (z->rom.er_Type & ERTF_MEMLIST)
mark_region(zorro_resource_start(z),
zorro_resource_end(z)+1, 1);
}
/* Unmark all used Zorro II memory */
for (i = 0; i < m68k_num_memory; i++)
if (m68k_memory[i].addr < 16*1024*1024)
mark_region(m68k_memory[i].addr,
m68k_memory[i].addr+m68k_memory[i].size,
0);
return 0;
}
static struct platform_driver amiga_zorro_driver = {
.driver = {
.name = "amiga-zorro",
},
};
static int __init amiga_zorro_init(void)
{
return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe);
}
module_init(amiga_zorro_init);
MODULE_LICENSE("GPL");
| linux-master | drivers/zorro/zorro.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Cadence Design Systems Inc.
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "internals.h"
static DEFINE_IDR(i3c_bus_idr);
static DEFINE_MUTEX(i3c_core_lock);
static int __i3c_first_dynamic_bus_num;
/**
* i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
* @bus: I3C bus to take the lock on
*
* This function takes the bus lock so that no other operations can occur on
* the bus. This is needed for all kind of bus maintenance operation, like
* - enabling/disabling slave events
* - re-triggering DAA
* - changing the dynamic address of a device
* - relinquishing mastership
* - ...
*
* The reason for this kind of locking is that we don't want drivers and core
* logic to rely on I3C device information that could be changed behind their
* back.
*/
static void i3c_bus_maintenance_lock(struct i3c_bus *bus)
{
down_write(&bus->lock);
}
/**
* i3c_bus_maintenance_unlock - Release the bus lock after a maintenance
* operation
* @bus: I3C bus to release the lock on
*
* Should be called when the bus maintenance operation is done. See
* i3c_bus_maintenance_lock() for more details on what these maintenance
* operations are.
*/
static void i3c_bus_maintenance_unlock(struct i3c_bus *bus)
{
up_write(&bus->lock);
}
/**
* i3c_bus_normaluse_lock - Lock the bus for a normal operation
* @bus: I3C bus to take the lock on
*
* This function takes the bus lock for any operation that is not a maintenance
* operation (see i3c_bus_maintenance_lock() for a non-exhaustive list of
* maintenance operations). Basically all communications with I3C devices are
* normal operations (HDR, SDR transfers or CCC commands that do not change bus
* state or I3C dynamic address).
*
* Note that this lock is not guaranteeing serialization of normal operations.
* In other words, transfer requests passed to the I3C master can be submitted
* in parallel and I3C master drivers have to use their own locking to make
* sure two different communications are not inter-mixed, or access to the
* output/input queue is not done while the engine is busy.
*/
void i3c_bus_normaluse_lock(struct i3c_bus *bus)
{
down_read(&bus->lock);
}
/**
* i3c_bus_normaluse_unlock - Release the bus lock after a normal operation
* @bus: I3C bus to release the lock on
*
* Should be called when a normal operation is done. See
* i3c_bus_normaluse_lock() for more details on what these normal operations
* are.
*/
void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
{
up_read(&bus->lock);
}
static struct i3c_master_controller *
i3c_bus_to_i3c_master(struct i3c_bus *i3cbus)
{
return container_of(i3cbus, struct i3c_master_controller, bus);
}
static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
{
return container_of(dev, struct i3c_master_controller, dev);
}
static const struct device_type i3c_device_type;
static struct i3c_bus *dev_to_i3cbus(struct device *dev)
{
struct i3c_master_controller *master;
if (dev->type == &i3c_device_type)
return dev_to_i3cdev(dev)->bus;
master = dev_to_i3cmaster(dev);
return &master->bus;
}
static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
{
struct i3c_master_controller *master;
if (dev->type == &i3c_device_type)
return dev_to_i3cdev(dev)->desc;
master = dev_to_i3cmaster(dev);
return master->this;
}
static ssize_t bcr_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *bus = dev_to_i3cbus(dev);
struct i3c_dev_desc *desc;
ssize_t ret;
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
ret = sprintf(buf, "%x\n", desc->info.bcr);
i3c_bus_normaluse_unlock(bus);
return ret;
}
static DEVICE_ATTR_RO(bcr);
static ssize_t dcr_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *bus = dev_to_i3cbus(dev);
struct i3c_dev_desc *desc;
ssize_t ret;
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
ret = sprintf(buf, "%x\n", desc->info.dcr);
i3c_bus_normaluse_unlock(bus);
return ret;
}
static DEVICE_ATTR_RO(dcr);
static ssize_t pid_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *bus = dev_to_i3cbus(dev);
struct i3c_dev_desc *desc;
ssize_t ret;
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
ret = sprintf(buf, "%llx\n", desc->info.pid);
i3c_bus_normaluse_unlock(bus);
return ret;
}
static DEVICE_ATTR_RO(pid);
static ssize_t dynamic_address_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *bus = dev_to_i3cbus(dev);
struct i3c_dev_desc *desc;
ssize_t ret;
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
ret = sprintf(buf, "%02x\n", desc->info.dyn_addr);
i3c_bus_normaluse_unlock(bus);
return ret;
}
static DEVICE_ATTR_RO(dynamic_address);
static const char * const hdrcap_strings[] = {
"hdr-ddr", "hdr-tsp", "hdr-tsl",
};
static ssize_t hdrcap_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *bus = dev_to_i3cbus(dev);
struct i3c_dev_desc *desc;
ssize_t offset = 0, ret;
unsigned long caps;
int mode;
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
caps = desc->info.hdr_cap;
for_each_set_bit(mode, &caps, 8) {
if (mode >= ARRAY_SIZE(hdrcap_strings))
break;
if (!hdrcap_strings[mode])
continue;
ret = sprintf(buf + offset, offset ? " %s" : "%s",
hdrcap_strings[mode]);
if (ret < 0)
goto out;
offset += ret;
}
ret = sprintf(buf + offset, "\n");
if (ret < 0)
goto out;
ret = offset + ret;
out:
i3c_bus_normaluse_unlock(bus);
return ret;
}
static DEVICE_ATTR_RO(hdrcap);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct i3c_device *i3c = dev_to_i3cdev(dev);
struct i3c_device_info devinfo;
u16 manuf, part, ext;
i3c_device_get_info(i3c, &devinfo);
manuf = I3C_PID_MANUF_ID(devinfo.pid);
part = I3C_PID_PART_ID(devinfo.pid);
ext = I3C_PID_EXTRA_INFO(devinfo.pid);
if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
return sprintf(buf, "i3c:dcr%02Xmanuf%04X", devinfo.dcr,
manuf);
return sprintf(buf, "i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
devinfo.dcr, manuf, part, ext);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *i3c_device_attrs[] = {
&dev_attr_bcr.attr,
&dev_attr_dcr.attr,
&dev_attr_pid.attr,
&dev_attr_dynamic_address.attr,
&dev_attr_hdrcap.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(i3c_device);
static int i3c_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct i3c_device *i3cdev = dev_to_i3cdev(dev);
struct i3c_device_info devinfo;
u16 manuf, part, ext;
i3c_device_get_info(i3cdev, &devinfo);
manuf = I3C_PID_MANUF_ID(devinfo.pid);
part = I3C_PID_PART_ID(devinfo.pid);
ext = I3C_PID_EXTRA_INFO(devinfo.pid);
if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
return add_uevent_var(env, "MODALIAS=i3c:dcr%02Xmanuf%04X",
devinfo.dcr, manuf);
return add_uevent_var(env,
"MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
devinfo.dcr, manuf, part, ext);
}
static const struct device_type i3c_device_type = {
.groups = i3c_device_groups,
.uevent = i3c_device_uevent,
};
static int i3c_device_match(struct device *dev, struct device_driver *drv)
{
struct i3c_device *i3cdev;
struct i3c_driver *i3cdrv;
if (dev->type != &i3c_device_type)
return 0;
i3cdev = dev_to_i3cdev(dev);
i3cdrv = drv_to_i3cdrv(drv);
if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
return 1;
return 0;
}
static int i3c_device_probe(struct device *dev)
{
struct i3c_device *i3cdev = dev_to_i3cdev(dev);
struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
return driver->probe(i3cdev);
}
static void i3c_device_remove(struct device *dev)
{
struct i3c_device *i3cdev = dev_to_i3cdev(dev);
struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
if (driver->remove)
driver->remove(i3cdev);
i3c_device_free_ibi(i3cdev);
}
struct bus_type i3c_bus_type = {
.name = "i3c",
.match = i3c_device_match,
.probe = i3c_device_probe,
.remove = i3c_device_remove,
};
static enum i3c_addr_slot_status
i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
{
unsigned long status;
int bitpos = addr * 2;
if (addr > I2C_MAX_ADDR)
return I3C_ADDR_SLOT_RSVD;
status = bus->addrslots[bitpos / BITS_PER_LONG];
status >>= bitpos % BITS_PER_LONG;
return status & I3C_ADDR_SLOT_STATUS_MASK;
}
static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
enum i3c_addr_slot_status status)
{
int bitpos = addr * 2;
unsigned long *ptr;
if (addr > I2C_MAX_ADDR)
return;
ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
*ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
(bitpos % BITS_PER_LONG));
*ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
}
static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
{
enum i3c_addr_slot_status status;
status = i3c_bus_get_addr_slot_status(bus, addr);
return status == I3C_ADDR_SLOT_FREE;
}
static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
{
enum i3c_addr_slot_status status;
u8 addr;
for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
status = i3c_bus_get_addr_slot_status(bus, addr);
if (status == I3C_ADDR_SLOT_FREE)
return addr;
}
return -ENOMEM;
}
static void i3c_bus_init_addrslots(struct i3c_bus *bus)
{
int i;
/* Addresses 0 to 7 are reserved. */
for (i = 0; i < 8; i++)
i3c_bus_set_addr_slot_status(bus, i, I3C_ADDR_SLOT_RSVD);
/*
* Reserve broadcast address and all addresses that might collide
* with the broadcast address when facing a single bit error.
*/
i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR,
I3C_ADDR_SLOT_RSVD);
for (i = 0; i < 7; i++)
i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR ^ BIT(i),
I3C_ADDR_SLOT_RSVD);
}
static void i3c_bus_cleanup(struct i3c_bus *i3cbus)
{
mutex_lock(&i3c_core_lock);
idr_remove(&i3c_bus_idr, i3cbus->id);
mutex_unlock(&i3c_core_lock);
}
static int i3c_bus_init(struct i3c_bus *i3cbus, struct device_node *np)
{
int ret, start, end, id = -1;
init_rwsem(&i3cbus->lock);
INIT_LIST_HEAD(&i3cbus->devs.i2c);
INIT_LIST_HEAD(&i3cbus->devs.i3c);
i3c_bus_init_addrslots(i3cbus);
i3cbus->mode = I3C_BUS_MODE_PURE;
if (np)
id = of_alias_get_id(np, "i3c");
mutex_lock(&i3c_core_lock);
if (id >= 0) {
start = id;
end = start + 1;
} else {
start = __i3c_first_dynamic_bus_num;
end = 0;
}
ret = idr_alloc(&i3c_bus_idr, i3cbus, start, end, GFP_KERNEL);
mutex_unlock(&i3c_core_lock);
if (ret < 0)
return ret;
i3cbus->id = ret;
return 0;
}
static const char * const i3c_bus_mode_strings[] = {
[I3C_BUS_MODE_PURE] = "pure",
[I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
[I3C_BUS_MODE_MIXED_LIMITED] = "mixed-limited",
[I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
};
static ssize_t mode_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
ssize_t ret;
i3c_bus_normaluse_lock(i3cbus);
if (i3cbus->mode < 0 ||
i3cbus->mode >= ARRAY_SIZE(i3c_bus_mode_strings) ||
!i3c_bus_mode_strings[i3cbus->mode])
ret = sprintf(buf, "unknown\n");
else
ret = sprintf(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
i3c_bus_normaluse_unlock(i3cbus);
return ret;
}
static DEVICE_ATTR_RO(mode);
static ssize_t current_master_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
ssize_t ret;
i3c_bus_normaluse_lock(i3cbus);
ret = sprintf(buf, "%d-%llx\n", i3cbus->id,
i3cbus->cur_master->info.pid);
i3c_bus_normaluse_unlock(i3cbus);
return ret;
}
static DEVICE_ATTR_RO(current_master);
static ssize_t i3c_scl_frequency_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
ssize_t ret;
i3c_bus_normaluse_lock(i3cbus);
ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i3c);
i3c_bus_normaluse_unlock(i3cbus);
return ret;
}
static DEVICE_ATTR_RO(i3c_scl_frequency);
static ssize_t i2c_scl_frequency_show(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
ssize_t ret;
i3c_bus_normaluse_lock(i3cbus);
ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i2c);
i3c_bus_normaluse_unlock(i3cbus);
return ret;
}
static DEVICE_ATTR_RO(i2c_scl_frequency);
static struct attribute *i3c_masterdev_attrs[] = {
&dev_attr_mode.attr,
&dev_attr_current_master.attr,
&dev_attr_i3c_scl_frequency.attr,
&dev_attr_i2c_scl_frequency.attr,
&dev_attr_bcr.attr,
&dev_attr_dcr.attr,
&dev_attr_pid.attr,
&dev_attr_dynamic_address.attr,
&dev_attr_hdrcap.attr,
NULL,
};
ATTRIBUTE_GROUPS(i3c_masterdev);
static void i3c_masterdev_release(struct device *dev)
{
struct i3c_master_controller *master = dev_to_i3cmaster(dev);
struct i3c_bus *bus = dev_to_i3cbus(dev);
if (master->wq)
destroy_workqueue(master->wq);
WARN_ON(!list_empty(&bus->devs.i2c) || !list_empty(&bus->devs.i3c));
i3c_bus_cleanup(bus);
of_node_put(dev->of_node);
}
static const struct device_type i3c_masterdev_type = {
.groups = i3c_masterdev_groups,
};
static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
unsigned long max_i2c_scl_rate)
{
struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
i3cbus->mode = mode;
switch (i3cbus->mode) {
case I3C_BUS_MODE_PURE:
if (!i3cbus->scl_rate.i3c)
i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
break;
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
if (!i3cbus->scl_rate.i3c)
i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
if (!i3cbus->scl_rate.i2c)
i3cbus->scl_rate.i2c = max_i2c_scl_rate;
break;
case I3C_BUS_MODE_MIXED_SLOW:
if (!i3cbus->scl_rate.i2c)
i3cbus->scl_rate.i2c = max_i2c_scl_rate;
if (!i3cbus->scl_rate.i3c ||
i3cbus->scl_rate.i3c > i3cbus->scl_rate.i2c)
i3cbus->scl_rate.i3c = i3cbus->scl_rate.i2c;
break;
default:
return -EINVAL;
}
dev_dbg(&master->dev, "i2c-scl = %ld Hz i3c-scl = %ld Hz\n",
i3cbus->scl_rate.i2c, i3cbus->scl_rate.i3c);
/*
* I3C/I2C frequency may have been overridden, check that user-provided
* values are not exceeding max possible frequency.
*/
if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
return -EINVAL;
return 0;
}
static struct i3c_master_controller *
i2c_adapter_to_i3c_master(struct i2c_adapter *adap)
{
return container_of(adap, struct i3c_master_controller, i2c);
}
static struct i2c_adapter *
i3c_master_to_i2c_adapter(struct i3c_master_controller *master)
{
return &master->i2c;
}
static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev)
{
kfree(dev);
}
static struct i2c_dev_desc *
i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
u16 addr, u8 lvr)
{
struct i2c_dev_desc *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->common.master = master;
dev->addr = addr;
dev->lvr = lvr;
return dev;
}
static void *i3c_ccc_cmd_dest_init(struct i3c_ccc_cmd_dest *dest, u8 addr,
u16 payloadlen)
{
dest->addr = addr;
dest->payload.len = payloadlen;
if (payloadlen)
dest->payload.data = kzalloc(payloadlen, GFP_KERNEL);
else
dest->payload.data = NULL;
return dest->payload.data;
}
static void i3c_ccc_cmd_dest_cleanup(struct i3c_ccc_cmd_dest *dest)
{
kfree(dest->payload.data);
}
static void i3c_ccc_cmd_init(struct i3c_ccc_cmd *cmd, bool rnw, u8 id,
struct i3c_ccc_cmd_dest *dests,
unsigned int ndests)
{
cmd->rnw = rnw ? 1 : 0;
cmd->id = id;
cmd->dests = dests;
cmd->ndests = ndests;
cmd->err = I3C_ERROR_UNKNOWN;
}
static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
struct i3c_ccc_cmd *cmd)
{
int ret;
if (!cmd || !master)
return -EINVAL;
if (WARN_ON(master->init_done &&
!rwsem_is_locked(&master->bus.lock)))
return -EINVAL;
if (!master->ops->send_ccc_cmd)
return -ENOTSUPP;
if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
return -EINVAL;
if (master->ops->supports_ccc_cmd &&
!master->ops->supports_ccc_cmd(master, cmd))
return -ENOTSUPP;
ret = master->ops->send_ccc_cmd(master, cmd);
if (ret) {
if (cmd->err != I3C_ERROR_UNKNOWN)
return cmd->err;
return ret;
}
return 0;
}
static struct i2c_dev_desc *
i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master,
u16 addr)
{
struct i2c_dev_desc *dev;
i3c_bus_for_each_i2cdev(&master->bus, dev) {
if (dev->addr == addr)
return dev;
}
return NULL;
}
/**
* i3c_master_get_free_addr() - get a free address on the bus
* @master: I3C master object
* @start_addr: where to start searching
*
* This function must be called with the bus lock held in write mode.
*
* Return: the first free address starting at @start_addr (included) or -ENOMEM
* if there's no more address available.
*/
int i3c_master_get_free_addr(struct i3c_master_controller *master,
u8 start_addr)
{
return i3c_bus_get_free_addr(&master->bus, start_addr);
}
EXPORT_SYMBOL_GPL(i3c_master_get_free_addr);
static void i3c_device_release(struct device *dev)
{
struct i3c_device *i3cdev = dev_to_i3cdev(dev);
WARN_ON(i3cdev->desc);
of_node_put(i3cdev->dev.of_node);
kfree(i3cdev);
}
static void i3c_master_free_i3c_dev(struct i3c_dev_desc *dev)
{
kfree(dev);
}
static struct i3c_dev_desc *
i3c_master_alloc_i3c_dev(struct i3c_master_controller *master,
const struct i3c_device_info *info)
{
struct i3c_dev_desc *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->common.master = master;
dev->info = *info;
mutex_init(&dev->ibi_lock);
return dev;
}
static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
u8 addr)
{
enum i3c_addr_slot_status addrstat;
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret;
if (!master)
return -EINVAL;
addrstat = i3c_bus_get_addr_slot_status(&master->bus, addr);
if (addr != I3C_BROADCAST_ADDR && addrstat != I3C_ADDR_SLOT_I3C_DEV)
return -EINVAL;
i3c_ccc_cmd_dest_init(&dest, addr, 0);
i3c_ccc_cmd_init(&cmd, false,
I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR),
&dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
/**
* i3c_master_entdaa_locked() - start a DAA (Dynamic Address Assignment)
* procedure
* @master: master used to send frames on the bus
*
* Send a ENTDAA CCC command to start a DAA procedure.
*
* Note that this function only sends the ENTDAA CCC command, all the logic
* behind dynamic address assignment has to be handled in the I3C master
* driver.
*
* This function must be called with the bus lock held in write mode.
*
* Return: 0 in case of success, a positive I3C error code if the error is
* one of the official Mx error codes, and a negative error code otherwise.
*/
int i3c_master_entdaa_locked(struct i3c_master_controller *master)
{
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret;
i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_master_entdaa_locked);
static int i3c_master_enec_disec_locked(struct i3c_master_controller *master,
u8 addr, bool enable, u8 evts)
{
struct i3c_ccc_events *events;
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret;
events = i3c_ccc_cmd_dest_init(&dest, addr, sizeof(*events));
if (!events)
return -ENOMEM;
events->events = evts;
i3c_ccc_cmd_init(&cmd, false,
enable ?
I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
&dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
/**
* i3c_master_disec_locked() - send a DISEC CCC command
* @master: master used to send frames on the bus
* @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
* @evts: events to disable
*
* Send a DISEC CCC command to disable some or all events coming from a
* specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
*
* This function must be called with the bus lock held in write mode.
*
* Return: 0 in case of success, a positive I3C error code if the error is
* one of the official Mx error codes, and a negative error code otherwise.
*/
int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
u8 evts)
{
return i3c_master_enec_disec_locked(master, addr, false, evts);
}
EXPORT_SYMBOL_GPL(i3c_master_disec_locked);
/**
* i3c_master_enec_locked() - send an ENEC CCC command
* @master: master used to send frames on the bus
* @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
* @evts: events to disable
*
* Sends an ENEC CCC command to enable some or all events coming from a
* specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
*
* This function must be called with the bus lock held in write mode.
*
* Return: 0 in case of success, a positive I3C error code if the error is
* one of the official Mx error codes, and a negative error code otherwise.
*/
int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
u8 evts)
{
return i3c_master_enec_disec_locked(master, addr, true, evts);
}
EXPORT_SYMBOL_GPL(i3c_master_enec_locked);
/**
* i3c_master_defslvs_locked() - send a DEFSLVS CCC command
* @master: master used to send frames on the bus
*
* Send a DEFSLVS CCC command containing all the devices known to the @master.
* This is useful when you have secondary masters on the bus to propagate
* device information.
*
* This should be called after all I3C devices have been discovered (in other
* words, after the DAA procedure has finished) and instantiated in
* &i3c_master_controller_ops->bus_init().
* It should also be called if a master ACKed an Hot-Join request and assigned
* a dynamic address to the device joining the bus.
*
* This function must be called with the bus lock held in write mode.
*
* Return: 0 in case of success, a positive I3C error code if the error is
* one of the official Mx error codes, and a negative error code otherwise.
*/
int i3c_master_defslvs_locked(struct i3c_master_controller *master)
{
struct i3c_ccc_defslvs *defslvs;
struct i3c_ccc_dev_desc *desc;
struct i3c_ccc_cmd_dest dest;
struct i3c_dev_desc *i3cdev;
struct i2c_dev_desc *i2cdev;
struct i3c_ccc_cmd cmd;
struct i3c_bus *bus;
bool send = false;
int ndevs = 0, ret;
if (!master)
return -EINVAL;
bus = i3c_master_get_bus(master);
i3c_bus_for_each_i3cdev(bus, i3cdev) {
ndevs++;
if (i3cdev == master->this)
continue;
if (I3C_BCR_DEVICE_ROLE(i3cdev->info.bcr) ==
I3C_BCR_I3C_MASTER)
send = true;
}
/* No other master on the bus, skip DEFSLVS. */
if (!send)
return 0;
i3c_bus_for_each_i2cdev(bus, i2cdev)
ndevs++;
defslvs = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR,
struct_size(defslvs, slaves,
ndevs - 1));
if (!defslvs)
return -ENOMEM;
defslvs->count = ndevs;
defslvs->master.bcr = master->this->info.bcr;
defslvs->master.dcr = master->this->info.dcr;
defslvs->master.dyn_addr = master->this->info.dyn_addr << 1;
defslvs->master.static_addr = I3C_BROADCAST_ADDR << 1;
desc = defslvs->slaves;
i3c_bus_for_each_i2cdev(bus, i2cdev) {
desc->lvr = i2cdev->lvr;
desc->static_addr = i2cdev->addr << 1;
desc++;
}
i3c_bus_for_each_i3cdev(bus, i3cdev) {
/* Skip the I3C dev representing this master. */
if (i3cdev == master->this)
continue;
desc->bcr = i3cdev->info.bcr;
desc->dcr = i3cdev->info.dcr;
desc->dyn_addr = i3cdev->info.dyn_addr << 1;
desc->static_addr = i3cdev->info.static_addr << 1;
desc++;
}
i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_master_defslvs_locked);
static int i3c_master_setda_locked(struct i3c_master_controller *master,
u8 oldaddr, u8 newaddr, bool setdasa)
{
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_setda *setda;
struct i3c_ccc_cmd cmd;
int ret;
if (!oldaddr || !newaddr)
return -EINVAL;
setda = i3c_ccc_cmd_dest_init(&dest, oldaddr, sizeof(*setda));
if (!setda)
return -ENOMEM;
setda->addr = newaddr << 1;
i3c_ccc_cmd_init(&cmd, false,
setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA,
&dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_setdasa_locked(struct i3c_master_controller *master,
u8 static_addr, u8 dyn_addr)
{
return i3c_master_setda_locked(master, static_addr, dyn_addr, true);
}
static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
u8 oldaddr, u8 newaddr)
{
return i3c_master_setda_locked(master, oldaddr, newaddr, false);
}
static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_mrl *mrl;
struct i3c_ccc_cmd cmd;
int ret;
mrl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mrl));
if (!mrl)
return -ENOMEM;
/*
* When the device does not have IBI payload GETMRL only returns 2
* bytes of data.
*/
if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
dest.payload.len -= 1;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
switch (dest.payload.len) {
case 3:
info->max_ibi_len = mrl->ibi_len;
fallthrough;
case 2:
info->max_read_len = be16_to_cpu(mrl->read_len);
break;
default:
ret = -EIO;
goto out;
}
out:
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_mwl *mwl;
struct i3c_ccc_cmd cmd;
int ret;
mwl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mwl));
if (!mwl)
return -ENOMEM;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
if (dest.payload.len != sizeof(*mwl)) {
ret = -EIO;
goto out;
}
info->max_write_len = be16_to_cpu(mwl->len);
out:
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_getmxds *getmaxds;
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret;
getmaxds = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
sizeof(*getmaxds));
if (!getmaxds)
return -ENOMEM;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
if (dest.payload.len != 2 && dest.payload.len != 5) {
ret = -EIO;
goto out;
}
info->max_read_ds = getmaxds->maxrd;
info->max_write_ds = getmaxds->maxwr;
if (dest.payload.len == 5)
info->max_read_turnaround = getmaxds->maxrdturn[0] |
((u32)getmaxds->maxrdturn[1] << 8) |
((u32)getmaxds->maxrdturn[2] << 16);
out:
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_gethdrcap_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_gethdrcap *gethdrcap;
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret;
gethdrcap = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
sizeof(*gethdrcap));
if (!gethdrcap)
return -ENOMEM;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
if (dest.payload.len != 1) {
ret = -EIO;
goto out;
}
info->hdr_cap = gethdrcap->modes;
out:
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_getpid_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_getpid *getpid;
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret, i;
getpid = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getpid));
if (!getpid)
return -ENOMEM;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
info->pid = 0;
for (i = 0; i < sizeof(getpid->pid); i++) {
int sft = (sizeof(getpid->pid) - i - 1) * 8;
info->pid |= (u64)getpid->pid[i] << sft;
}
out:
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_getbcr_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_getbcr *getbcr;
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret;
getbcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getbcr));
if (!getbcr)
return -ENOMEM;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
info->bcr = getbcr->bcr;
out:
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
struct i3c_ccc_getdcr *getdcr;
struct i3c_ccc_cmd_dest dest;
struct i3c_ccc_cmd cmd;
int ret;
getdcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getdcr));
if (!getdcr)
return -ENOMEM;
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
if (ret)
goto out;
info->dcr = getdcr->dcr;
out:
i3c_ccc_cmd_dest_cleanup(&dest);
return ret;
}
static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
enum i3c_addr_slot_status slot_status;
int ret;
if (!dev->info.dyn_addr)
return -EINVAL;
slot_status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.dyn_addr);
if (slot_status == I3C_ADDR_SLOT_RSVD ||
slot_status == I3C_ADDR_SLOT_I2C_DEV)
return -EINVAL;
ret = i3c_master_getpid_locked(master, &dev->info);
if (ret)
return ret;
ret = i3c_master_getbcr_locked(master, &dev->info);
if (ret)
return ret;
ret = i3c_master_getdcr_locked(master, &dev->info);
if (ret)
return ret;
if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) {
ret = i3c_master_getmxds_locked(master, &dev->info);
if (ret)
return ret;
}
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
dev->info.max_ibi_len = 1;
i3c_master_getmrl_locked(master, &dev->info);
i3c_master_getmwl_locked(master, &dev->info);
if (dev->info.bcr & I3C_BCR_HDR_CAP) {
ret = i3c_master_gethdrcap_locked(master, &dev->info);
if (ret)
return ret;
}
return 0;
}
static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
if (dev->info.static_addr)
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.static_addr,
I3C_ADDR_SLOT_FREE);
if (dev->info.dyn_addr)
i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
I3C_ADDR_SLOT_FREE);
if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
I3C_ADDR_SLOT_FREE);
}
static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
enum i3c_addr_slot_status status;
if (!dev->info.static_addr && !dev->info.dyn_addr)
return 0;
if (dev->info.static_addr) {
status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.static_addr);
/* Since static address and assigned dynamic address can be
* equal, allow this case to pass.
*/
if (status != I3C_ADDR_SLOT_FREE &&
dev->info.static_addr != dev->boardinfo->init_dyn_addr)
return -EBUSY;
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.static_addr,
I3C_ADDR_SLOT_I3C_DEV);
}
/*
* ->init_dyn_addr should have been reserved before that, so, if we're
* trying to apply a pre-reserved dynamic address, we should not try
* to reserve the address slot a second time.
*/
if (dev->info.dyn_addr &&
(!dev->boardinfo ||
dev->boardinfo->init_dyn_addr != dev->info.dyn_addr)) {
status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.dyn_addr);
if (status != I3C_ADDR_SLOT_FREE)
goto err_release_static_addr;
i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
I3C_ADDR_SLOT_I3C_DEV);
}
return 0;
err_release_static_addr:
if (dev->info.static_addr)
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.static_addr,
I3C_ADDR_SLOT_FREE);
return -EBUSY;
}
static int i3c_master_attach_i3c_dev(struct i3c_master_controller *master,
struct i3c_dev_desc *dev)
{
int ret;
/*
* We don't attach devices to the controller until they are
* addressable on the bus.
*/
if (!dev->info.static_addr && !dev->info.dyn_addr)
return 0;
ret = i3c_master_get_i3c_addrs(dev);
if (ret)
return ret;
/* Do not attach the master device itself. */
if (master->this != dev && master->ops->attach_i3c_dev) {
ret = master->ops->attach_i3c_dev(dev);
if (ret) {
i3c_master_put_i3c_addrs(dev);
return ret;
}
}
list_add_tail(&dev->common.node, &master->bus.devs.i3c);
return 0;
}
static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
u8 old_dyn_addr)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
enum i3c_addr_slot_status status;
int ret;
if (dev->info.dyn_addr != old_dyn_addr &&
(!dev->boardinfo ||
dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) {
status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.dyn_addr);
if (status != I3C_ADDR_SLOT_FREE)
return -EBUSY;
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.dyn_addr,
I3C_ADDR_SLOT_I3C_DEV);
if (old_dyn_addr)
i3c_bus_set_addr_slot_status(&master->bus, old_dyn_addr,
I3C_ADDR_SLOT_FREE);
}
if (master->ops->reattach_i3c_dev) {
ret = master->ops->reattach_i3c_dev(dev, old_dyn_addr);
if (ret) {
i3c_master_put_i3c_addrs(dev);
return ret;
}
}
return 0;
}
static void i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
/* Do not detach the master device itself. */
if (master->this != dev && master->ops->detach_i3c_dev)
master->ops->detach_i3c_dev(dev);
i3c_master_put_i3c_addrs(dev);
list_del(&dev->common.node);
}
static int i3c_master_attach_i2c_dev(struct i3c_master_controller *master,
struct i2c_dev_desc *dev)
{
int ret;
if (master->ops->attach_i2c_dev) {
ret = master->ops->attach_i2c_dev(dev);
if (ret)
return ret;
}
list_add_tail(&dev->common.node, &master->bus.devs.i2c);
return 0;
}
static void i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *master = i2c_dev_get_master(dev);
list_del(&dev->common.node);
if (master->ops->detach_i2c_dev)
master->ops->detach_i2c_dev(dev);
}
static int i3c_master_early_i3c_dev_add(struct i3c_master_controller *master,
struct i3c_dev_boardinfo *boardinfo)
{
struct i3c_device_info info = {
.static_addr = boardinfo->static_addr,
.pid = boardinfo->pid,
};
struct i3c_dev_desc *i3cdev;
int ret;
i3cdev = i3c_master_alloc_i3c_dev(master, &info);
if (IS_ERR(i3cdev))
return -ENOMEM;
i3cdev->boardinfo = boardinfo;
ret = i3c_master_attach_i3c_dev(master, i3cdev);
if (ret)
goto err_free_dev;
ret = i3c_master_setdasa_locked(master, i3cdev->info.static_addr,
i3cdev->boardinfo->init_dyn_addr);
if (ret)
goto err_detach_dev;
i3cdev->info.dyn_addr = i3cdev->boardinfo->init_dyn_addr;
ret = i3c_master_reattach_i3c_dev(i3cdev, 0);
if (ret)
goto err_rstdaa;
ret = i3c_master_retrieve_dev_info(i3cdev);
if (ret)
goto err_rstdaa;
return 0;
err_rstdaa:
i3c_master_rstdaa_locked(master, i3cdev->boardinfo->init_dyn_addr);
err_detach_dev:
i3c_master_detach_i3c_dev(i3cdev);
err_free_dev:
i3c_master_free_i3c_dev(i3cdev);
return ret;
}
static void
i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
{
struct i3c_dev_desc *desc;
int ret;
if (!master->init_done)
return;
i3c_bus_for_each_i3cdev(&master->bus, desc) {
if (desc->dev || !desc->info.dyn_addr || desc == master->this)
continue;
desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
if (!desc->dev)
continue;
desc->dev->bus = &master->bus;
desc->dev->desc = desc;
desc->dev->dev.parent = &master->dev;
desc->dev->dev.type = &i3c_device_type;
desc->dev->dev.bus = &i3c_bus_type;
desc->dev->dev.release = i3c_device_release;
dev_set_name(&desc->dev->dev, "%d-%llx", master->bus.id,
desc->info.pid);
if (desc->boardinfo)
desc->dev->dev.of_node = desc->boardinfo->of_node;
ret = device_register(&desc->dev->dev);
if (ret)
dev_err(&master->dev,
"Failed to add I3C device (err = %d)\n", ret);
}
}
/**
* i3c_master_do_daa() - do a DAA (Dynamic Address Assignment)
* @master: master doing the DAA
*
* This function is instantiating an I3C device object and adding it to the
* I3C device list. All device information are automatically retrieved using
* standard CCC commands.
*
* The I3C device object is returned in case the master wants to attach
* private data to it using i3c_dev_set_master_data().
*
* This function must be called with the bus lock held in write mode.
*
* Return: a 0 in case of success, an negative error code otherwise.
*/
int i3c_master_do_daa(struct i3c_master_controller *master)
{
int ret;
i3c_bus_maintenance_lock(&master->bus);
ret = master->ops->do_daa(master);
i3c_bus_maintenance_unlock(&master->bus);
if (ret)
return ret;
i3c_bus_normaluse_lock(&master->bus);
i3c_master_register_new_i3c_devs(master);
i3c_bus_normaluse_unlock(&master->bus);
return 0;
}
EXPORT_SYMBOL_GPL(i3c_master_do_daa);
/**
* i3c_master_set_info() - set master device information
* @master: master used to send frames on the bus
* @info: I3C device information
*
* Set master device info. This should be called from
* &i3c_master_controller_ops->bus_init().
*
* Not all &i3c_device_info fields are meaningful for a master device.
* Here is a list of fields that should be properly filled:
*
* - &i3c_device_info->dyn_addr
* - &i3c_device_info->bcr
* - &i3c_device_info->dcr
* - &i3c_device_info->pid
* - &i3c_device_info->hdr_cap if %I3C_BCR_HDR_CAP bit is set in
* &i3c_device_info->bcr
*
* This function must be called with the bus lock held in maintenance mode.
*
* Return: 0 if @info contains valid information (not every piece of
* information can be checked, but we can at least make sure @info->dyn_addr
* and @info->bcr are correct), -EINVAL otherwise.
*/
int i3c_master_set_info(struct i3c_master_controller *master,
const struct i3c_device_info *info)
{
struct i3c_dev_desc *i3cdev;
int ret;
if (!i3c_bus_dev_addr_is_avail(&master->bus, info->dyn_addr))
return -EINVAL;
if (I3C_BCR_DEVICE_ROLE(info->bcr) == I3C_BCR_I3C_MASTER &&
master->secondary)
return -EINVAL;
if (master->this)
return -EINVAL;
i3cdev = i3c_master_alloc_i3c_dev(master, info);
if (IS_ERR(i3cdev))
return PTR_ERR(i3cdev);
master->this = i3cdev;
master->bus.cur_master = master->this;
ret = i3c_master_attach_i3c_dev(master, i3cdev);
if (ret)
goto err_free_dev;
return 0;
err_free_dev:
i3c_master_free_i3c_dev(i3cdev);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_master_set_info);
static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
{
struct i3c_dev_desc *i3cdev, *i3ctmp;
struct i2c_dev_desc *i2cdev, *i2ctmp;
list_for_each_entry_safe(i3cdev, i3ctmp, &master->bus.devs.i3c,
common.node) {
i3c_master_detach_i3c_dev(i3cdev);
if (i3cdev->boardinfo && i3cdev->boardinfo->init_dyn_addr)
i3c_bus_set_addr_slot_status(&master->bus,
i3cdev->boardinfo->init_dyn_addr,
I3C_ADDR_SLOT_FREE);
i3c_master_free_i3c_dev(i3cdev);
}
list_for_each_entry_safe(i2cdev, i2ctmp, &master->bus.devs.i2c,
common.node) {
i3c_master_detach_i2c_dev(i2cdev);
i3c_bus_set_addr_slot_status(&master->bus,
i2cdev->addr,
I3C_ADDR_SLOT_FREE);
i3c_master_free_i2c_dev(i2cdev);
}
}
/**
* i3c_master_bus_init() - initialize an I3C bus
* @master: main master initializing the bus
*
* This function is following all initialisation steps described in the I3C
* specification:
*
* 1. Attach I2C devs to the master so that the master can fill its internal
* device table appropriately
*
* 2. Call &i3c_master_controller_ops->bus_init() method to initialize
* the master controller. That's usually where the bus mode is selected
* (pure bus or mixed fast/slow bus)
*
* 3. Instruct all devices on the bus to drop their dynamic address. This is
* particularly important when the bus was previously configured by someone
* else (for example the bootloader)
*
* 4. Disable all slave events.
*
* 5. Reserve address slots for I3C devices with init_dyn_addr. And if devices
* also have static_addr, try to pre-assign dynamic addresses requested by
* the FW with SETDASA and attach corresponding statically defined I3C
* devices to the master.
*
* 6. Do a DAA (Dynamic Address Assignment) to assign dynamic addresses to all
* remaining I3C devices
*
* Once this is done, all I3C and I2C devices should be usable.
*
* Return: a 0 in case of success, an negative error code otherwise.
*/
static int i3c_master_bus_init(struct i3c_master_controller *master)
{
enum i3c_addr_slot_status status;
struct i2c_dev_boardinfo *i2cboardinfo;
struct i3c_dev_boardinfo *i3cboardinfo;
struct i2c_dev_desc *i2cdev;
int ret;
/*
* First attach all devices with static definitions provided by the
* FW.
*/
list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
status = i3c_bus_get_addr_slot_status(&master->bus,
i2cboardinfo->base.addr);
if (status != I3C_ADDR_SLOT_FREE) {
ret = -EBUSY;
goto err_detach_devs;
}
i3c_bus_set_addr_slot_status(&master->bus,
i2cboardinfo->base.addr,
I3C_ADDR_SLOT_I2C_DEV);
i2cdev = i3c_master_alloc_i2c_dev(master,
i2cboardinfo->base.addr,
i2cboardinfo->lvr);
if (IS_ERR(i2cdev)) {
ret = PTR_ERR(i2cdev);
goto err_detach_devs;
}
ret = i3c_master_attach_i2c_dev(master, i2cdev);
if (ret) {
i3c_master_free_i2c_dev(i2cdev);
goto err_detach_devs;
}
}
/*
* Now execute the controller specific ->bus_init() routine, which
* might configure its internal logic to match the bus limitations.
*/
ret = master->ops->bus_init(master);
if (ret)
goto err_detach_devs;
/*
* The master device should have been instantiated in ->bus_init(),
* complain if this was not the case.
*/
if (!master->this) {
dev_err(&master->dev,
"master_set_info() was not called in ->bus_init()\n");
ret = -EINVAL;
goto err_bus_cleanup;
}
/*
* Reset all dynamic address that may have been assigned before
* (assigned by the bootloader for example).
*/
ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
if (ret && ret != I3C_ERROR_M2)
goto err_bus_cleanup;
/* Disable all slave events before starting DAA. */
ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
I3C_CCC_EVENT_HJ);
if (ret && ret != I3C_ERROR_M2)
goto err_bus_cleanup;
/*
* Reserve init_dyn_addr first, and then try to pre-assign dynamic
* address and retrieve device information if needed.
* In case pre-assign dynamic address fails, setting dynamic address to
* the requested init_dyn_addr is retried after DAA is done in
* i3c_master_add_i3c_dev_locked().
*/
list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
/*
* We don't reserve a dynamic address for devices that
* don't explicitly request one.
*/
if (!i3cboardinfo->init_dyn_addr)
continue;
ret = i3c_bus_get_addr_slot_status(&master->bus,
i3cboardinfo->init_dyn_addr);
if (ret != I3C_ADDR_SLOT_FREE) {
ret = -EBUSY;
goto err_rstdaa;
}
i3c_bus_set_addr_slot_status(&master->bus,
i3cboardinfo->init_dyn_addr,
I3C_ADDR_SLOT_I3C_DEV);
/*
* Only try to create/attach devices that have a static
* address. Other devices will be created/attached when
* DAA happens, and the requested dynamic address will
* be set using SETNEWDA once those devices become
* addressable.
*/
if (i3cboardinfo->static_addr)
i3c_master_early_i3c_dev_add(master, i3cboardinfo);
}
ret = i3c_master_do_daa(master);
if (ret)
goto err_rstdaa;
return 0;
err_rstdaa:
i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
err_bus_cleanup:
if (master->ops->bus_cleanup)
master->ops->bus_cleanup(master);
err_detach_devs:
i3c_master_detach_free_devs(master);
return ret;
}
static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
{
if (master->ops->bus_cleanup)
master->ops->bus_cleanup(master);
i3c_master_detach_free_devs(master);
}
static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
{
struct i3c_master_controller *master = i3cdev->common.master;
struct i3c_dev_boardinfo *i3cboardinfo;
list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
if (i3cdev->info.pid != i3cboardinfo->pid)
continue;
i3cdev->boardinfo = i3cboardinfo;
i3cdev->info.static_addr = i3cboardinfo->static_addr;
return;
}
}
static struct i3c_dev_desc *
i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
{
struct i3c_master_controller *master = i3c_dev_get_master(refdev);
struct i3c_dev_desc *i3cdev;
i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
if (i3cdev != refdev && i3cdev->info.pid == refdev->info.pid)
return i3cdev;
}
return NULL;
}
/**
* i3c_master_add_i3c_dev_locked() - add an I3C slave to the bus
* @master: master used to send frames on the bus
* @addr: I3C slave dynamic address assigned to the device
*
* This function is instantiating an I3C device object and adding it to the
* I3C device list. All device information are automatically retrieved using
* standard CCC commands.
*
* The I3C device object is returned in case the master wants to attach
* private data to it using i3c_dev_set_master_data().
*
* This function must be called with the bus lock held in write mode.
*
* Return: a 0 in case of success, an negative error code otherwise.
*/
int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
u8 addr)
{
struct i3c_device_info info = { .dyn_addr = addr };
struct i3c_dev_desc *newdev, *olddev;
u8 old_dyn_addr = addr, expected_dyn_addr;
struct i3c_ibi_setup ibireq = { };
bool enable_ibi = false;
int ret;
if (!master)
return -EINVAL;
newdev = i3c_master_alloc_i3c_dev(master, &info);
if (IS_ERR(newdev))
return PTR_ERR(newdev);
ret = i3c_master_attach_i3c_dev(master, newdev);
if (ret)
goto err_free_dev;
ret = i3c_master_retrieve_dev_info(newdev);
if (ret)
goto err_detach_dev;
i3c_master_attach_boardinfo(newdev);
olddev = i3c_master_search_i3c_dev_duplicate(newdev);
if (olddev) {
newdev->dev = olddev->dev;
if (newdev->dev)
newdev->dev->desc = newdev;
/*
* We need to restore the IBI state too, so let's save the
* IBI information and try to restore them after olddev has
* been detached+released and its IBI has been stopped and
* the associated resources have been freed.
*/
mutex_lock(&olddev->ibi_lock);
if (olddev->ibi) {
ibireq.handler = olddev->ibi->handler;
ibireq.max_payload_len = olddev->ibi->max_payload_len;
ibireq.num_slots = olddev->ibi->num_slots;
if (olddev->ibi->enabled) {
enable_ibi = true;
i3c_dev_disable_ibi_locked(olddev);
}
i3c_dev_free_ibi_locked(olddev);
}
mutex_unlock(&olddev->ibi_lock);
old_dyn_addr = olddev->info.dyn_addr;
i3c_master_detach_i3c_dev(olddev);
i3c_master_free_i3c_dev(olddev);
}
/*
* Depending on our previous state, the expected dynamic address might
* differ:
* - if the device already had a dynamic address assigned, let's try to
* re-apply this one
* - if the device did not have a dynamic address and the firmware
* requested a specific address, pick this one
* - in any other case, keep the address automatically assigned by the
* master
*/
if (old_dyn_addr && old_dyn_addr != newdev->info.dyn_addr)
expected_dyn_addr = old_dyn_addr;
else if (newdev->boardinfo && newdev->boardinfo->init_dyn_addr)
expected_dyn_addr = newdev->boardinfo->init_dyn_addr;
else
expected_dyn_addr = newdev->info.dyn_addr;
if (newdev->info.dyn_addr != expected_dyn_addr) {
/*
* Try to apply the expected dynamic address. If it fails, keep
* the address assigned by the master.
*/
ret = i3c_master_setnewda_locked(master,
newdev->info.dyn_addr,
expected_dyn_addr);
if (!ret) {
old_dyn_addr = newdev->info.dyn_addr;
newdev->info.dyn_addr = expected_dyn_addr;
i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
} else {
dev_err(&master->dev,
"Failed to assign reserved/old address to device %d%llx",
master->bus.id, newdev->info.pid);
}
}
/*
* Now is time to try to restore the IBI setup. If we're lucky,
* everything works as before, otherwise, all we can do is complain.
* FIXME: maybe we should add callback to inform the driver that it
* should request the IBI again instead of trying to hide that from
* him.
*/
if (ibireq.handler) {
mutex_lock(&newdev->ibi_lock);
ret = i3c_dev_request_ibi_locked(newdev, &ibireq);
if (ret) {
dev_err(&master->dev,
"Failed to request IBI on device %d-%llx",
master->bus.id, newdev->info.pid);
} else if (enable_ibi) {
ret = i3c_dev_enable_ibi_locked(newdev);
if (ret)
dev_err(&master->dev,
"Failed to re-enable IBI on device %d-%llx",
master->bus.id, newdev->info.pid);
}
mutex_unlock(&newdev->ibi_lock);
}
return 0;
err_detach_dev:
if (newdev->dev && newdev->dev->desc)
newdev->dev->desc = NULL;
i3c_master_detach_i3c_dev(newdev);
err_free_dev:
i3c_master_free_i3c_dev(newdev);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_master_add_i3c_dev_locked);
#define OF_I3C_REG1_IS_I2C_DEV BIT(31)
static int
of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
struct device_node *node, u32 *reg)
{
struct i2c_dev_boardinfo *boardinfo;
struct device *dev = &master->dev;
int ret;
boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
if (!boardinfo)
return -ENOMEM;
ret = of_i2c_get_board_info(dev, node, &boardinfo->base);
if (ret)
return ret;
/*
* The I3C Specification does not clearly say I2C devices with 10-bit
* address are supported. These devices can't be passed properly through
* DEFSLVS command.
*/
if (boardinfo->base.flags & I2C_CLIENT_TEN) {
dev_err(dev, "I2C device with 10 bit address not supported.");
return -ENOTSUPP;
}
/* LVR is encoded in reg[2]. */
boardinfo->lvr = reg[2];
list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
of_node_get(node);
return 0;
}
static int
of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
struct device_node *node, u32 *reg)
{
struct i3c_dev_boardinfo *boardinfo;
struct device *dev = &master->dev;
enum i3c_addr_slot_status addrstatus;
u32 init_dyn_addr = 0;
boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
if (!boardinfo)
return -ENOMEM;
if (reg[0]) {
if (reg[0] > I3C_MAX_ADDR)
return -EINVAL;
addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
reg[0]);
if (addrstatus != I3C_ADDR_SLOT_FREE)
return -EINVAL;
}
boardinfo->static_addr = reg[0];
if (!of_property_read_u32(node, "assigned-address", &init_dyn_addr)) {
if (init_dyn_addr > I3C_MAX_ADDR)
return -EINVAL;
addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
init_dyn_addr);
if (addrstatus != I3C_ADDR_SLOT_FREE)
return -EINVAL;
}
boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
return -EINVAL;
boardinfo->init_dyn_addr = init_dyn_addr;
boardinfo->of_node = of_node_get(node);
list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
return 0;
}
static int of_i3c_master_add_dev(struct i3c_master_controller *master,
struct device_node *node)
{
u32 reg[3];
int ret;
if (!master || !node)
return -EINVAL;
ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
if (ret)
return ret;
/*
* The manufacturer ID can't be 0. If reg[1] == 0 that means we're
* dealing with an I2C device.
*/
if (!reg[1])
ret = of_i3c_master_add_i2c_boardinfo(master, node, reg);
else
ret = of_i3c_master_add_i3c_boardinfo(master, node, reg);
return ret;
}
static int of_populate_i3c_bus(struct i3c_master_controller *master)
{
struct device *dev = &master->dev;
struct device_node *i3cbus_np = dev->of_node;
struct device_node *node;
int ret;
u32 val;
if (!i3cbus_np)
return 0;
for_each_available_child_of_node(i3cbus_np, node) {
ret = of_i3c_master_add_dev(master, node);
if (ret) {
of_node_put(node);
return ret;
}
}
/*
* The user might want to limit I2C and I3C speed in case some devices
* on the bus are not supporting typical rates, or if the bus topology
* prevents it from using max possible rate.
*/
if (!of_property_read_u32(i3cbus_np, "i2c-scl-hz", &val))
master->bus.scl_rate.i2c = val;
if (!of_property_read_u32(i3cbus_np, "i3c-scl-hz", &val))
master->bus.scl_rate.i3c = val;
return 0;
}
static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
struct i2c_msg *xfers, int nxfers)
{
struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
struct i2c_dev_desc *dev;
int i, ret;
u16 addr;
if (!xfers || !master || nxfers <= 0)
return -EINVAL;
if (!master->ops->i2c_xfers)
return -ENOTSUPP;
/* Doing transfers to different devices is not supported. */
addr = xfers[0].addr;
for (i = 1; i < nxfers; i++) {
if (addr != xfers[i].addr)
return -ENOTSUPP;
}
i3c_bus_normaluse_lock(&master->bus);
dev = i3c_master_find_i2c_dev_by_addr(master, addr);
if (!dev)
ret = -ENOENT;
else
ret = master->ops->i2c_xfers(dev, xfers, nxfers);
i3c_bus_normaluse_unlock(&master->bus);
return ret ? ret : nxfers;
}
static u32 i3c_master_i2c_funcs(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
}
static u8 i3c_master_i2c_get_lvr(struct i2c_client *client)
{
/* Fall back to no spike filters and FM bus mode. */
u8 lvr = I3C_LVR_I2C_INDEX(2) | I3C_LVR_I2C_FM_MODE;
if (client->dev.of_node) {
u32 reg[3];
if (!of_property_read_u32_array(client->dev.of_node, "reg",
reg, ARRAY_SIZE(reg)))
lvr = reg[2];
}
return lvr;
}
static int i3c_master_i2c_attach(struct i2c_adapter *adap, struct i2c_client *client)
{
struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
enum i3c_addr_slot_status status;
struct i2c_dev_desc *i2cdev;
int ret;
/* Already added by board info? */
if (i3c_master_find_i2c_dev_by_addr(master, client->addr))
return 0;
status = i3c_bus_get_addr_slot_status(&master->bus, client->addr);
if (status != I3C_ADDR_SLOT_FREE)
return -EBUSY;
i3c_bus_set_addr_slot_status(&master->bus, client->addr,
I3C_ADDR_SLOT_I2C_DEV);
i2cdev = i3c_master_alloc_i2c_dev(master, client->addr,
i3c_master_i2c_get_lvr(client));
if (IS_ERR(i2cdev)) {
ret = PTR_ERR(i2cdev);
goto out_clear_status;
}
ret = i3c_master_attach_i2c_dev(master, i2cdev);
if (ret)
goto out_free_dev;
return 0;
out_free_dev:
i3c_master_free_i2c_dev(i2cdev);
out_clear_status:
i3c_bus_set_addr_slot_status(&master->bus, client->addr,
I3C_ADDR_SLOT_FREE);
return ret;
}
static int i3c_master_i2c_detach(struct i2c_adapter *adap, struct i2c_client *client)
{
struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
struct i2c_dev_desc *dev;
dev = i3c_master_find_i2c_dev_by_addr(master, client->addr);
if (!dev)
return -ENODEV;
i3c_master_detach_i2c_dev(dev);
i3c_bus_set_addr_slot_status(&master->bus, dev->addr,
I3C_ADDR_SLOT_FREE);
i3c_master_free_i2c_dev(dev);
return 0;
}
static const struct i2c_algorithm i3c_master_i2c_algo = {
.master_xfer = i3c_master_i2c_adapter_xfer,
.functionality = i3c_master_i2c_funcs,
};
static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action,
void *data)
{
struct i2c_adapter *adap;
struct i2c_client *client;
struct device *dev = data;
struct i3c_master_controller *master;
int ret;
if (dev->type != &i2c_client_type)
return 0;
client = to_i2c_client(dev);
adap = client->adapter;
if (adap->algo != &i3c_master_i2c_algo)
return 0;
master = i2c_adapter_to_i3c_master(adap);
i3c_bus_maintenance_lock(&master->bus);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
ret = i3c_master_i2c_attach(adap, client);
break;
case BUS_NOTIFY_DEL_DEVICE:
ret = i3c_master_i2c_detach(adap, client);
break;
}
i3c_bus_maintenance_unlock(&master->bus);
return ret;
}
static struct notifier_block i2cdev_notifier = {
.notifier_call = i3c_i2c_notifier_call,
};
static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
{
struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
struct i2c_dev_desc *i2cdev;
struct i2c_dev_boardinfo *i2cboardinfo;
int ret;
adap->dev.parent = master->dev.parent;
adap->owner = master->dev.parent->driver->owner;
adap->algo = &i3c_master_i2c_algo;
strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
/* FIXME: Should we allow i3c masters to override these values? */
adap->timeout = 1000;
adap->retries = 3;
ret = i2c_add_adapter(adap);
if (ret)
return ret;
/*
* We silently ignore failures here. The bus should keep working
* correctly even if one or more i2c devices are not registered.
*/
list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
i2cdev = i3c_master_find_i2c_dev_by_addr(master,
i2cboardinfo->base.addr);
if (WARN_ON(!i2cdev))
continue;
i2cdev->dev = i2c_new_client_device(adap, &i2cboardinfo->base);
}
return 0;
}
static void i3c_master_i2c_adapter_cleanup(struct i3c_master_controller *master)
{
struct i2c_dev_desc *i2cdev;
i2c_del_adapter(&master->i2c);
i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
i2cdev->dev = NULL;
}
static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
{
struct i3c_dev_desc *i3cdev;
i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
if (!i3cdev->dev)
continue;
i3cdev->dev->desc = NULL;
if (device_is_registered(&i3cdev->dev->dev))
device_unregister(&i3cdev->dev->dev);
else
put_device(&i3cdev->dev->dev);
i3cdev->dev = NULL;
}
}
/**
* i3c_master_queue_ibi() - Queue an IBI
* @dev: the device this IBI is coming from
* @slot: the IBI slot used to store the payload
*
* Queue an IBI to the controller workqueue. The IBI handler attached to
* the dev will be called from a workqueue context.
*/
void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
{
atomic_inc(&dev->ibi->pending_ibis);
queue_work(dev->common.master->wq, &slot->work);
}
EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
static void i3c_master_handle_ibi(struct work_struct *work)
{
struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot,
work);
struct i3c_dev_desc *dev = slot->dev;
struct i3c_master_controller *master = i3c_dev_get_master(dev);
struct i3c_ibi_payload payload;
payload.data = slot->data;
payload.len = slot->len;
if (dev->dev)
dev->ibi->handler(dev->dev, &payload);
master->ops->recycle_ibi_slot(dev, slot);
if (atomic_dec_and_test(&dev->ibi->pending_ibis))
complete(&dev->ibi->all_ibis_handled);
}
static void i3c_master_init_ibi_slot(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
slot->dev = dev;
INIT_WORK(&slot->work, i3c_master_handle_ibi);
}
struct i3c_generic_ibi_slot {
struct list_head node;
struct i3c_ibi_slot base;
};
struct i3c_generic_ibi_pool {
spinlock_t lock;
unsigned int num_slots;
struct i3c_generic_ibi_slot *slots;
void *payload_buf;
struct list_head free_slots;
struct list_head pending;
};
/**
* i3c_generic_ibi_free_pool() - Free a generic IBI pool
* @pool: the IBI pool to free
*
* Free all IBI slots allated by a generic IBI pool.
*/
void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool)
{
struct i3c_generic_ibi_slot *slot;
unsigned int nslots = 0;
while (!list_empty(&pool->free_slots)) {
slot = list_first_entry(&pool->free_slots,
struct i3c_generic_ibi_slot, node);
list_del(&slot->node);
nslots++;
}
/*
* If the number of freed slots is not equal to the number of allocated
* slots we have a leak somewhere.
*/
WARN_ON(nslots != pool->num_slots);
kfree(pool->payload_buf);
kfree(pool->slots);
kfree(pool);
}
EXPORT_SYMBOL_GPL(i3c_generic_ibi_free_pool);
/**
* i3c_generic_ibi_alloc_pool() - Create a generic IBI pool
* @dev: the device this pool will be used for
* @req: IBI setup request describing what the device driver expects
*
* Create a generic IBI pool based on the information provided in @req.
*
* Return: a valid IBI pool in case of success, an ERR_PTR() otherwise.
*/
struct i3c_generic_ibi_pool *
i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct i3c_generic_ibi_pool *pool;
struct i3c_generic_ibi_slot *slot;
unsigned int i;
int ret;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(-ENOMEM);
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_slots);
INIT_LIST_HEAD(&pool->pending);
pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
if (!pool->slots) {
ret = -ENOMEM;
goto err_free_pool;
}
if (req->max_payload_len) {
pool->payload_buf = kcalloc(req->num_slots,
req->max_payload_len, GFP_KERNEL);
if (!pool->payload_buf) {
ret = -ENOMEM;
goto err_free_pool;
}
}
for (i = 0; i < req->num_slots; i++) {
slot = &pool->slots[i];
i3c_master_init_ibi_slot(dev, &slot->base);
if (req->max_payload_len)
slot->base.data = pool->payload_buf +
(i * req->max_payload_len);
list_add_tail(&slot->node, &pool->free_slots);
pool->num_slots++;
}
return pool;
err_free_pool:
i3c_generic_ibi_free_pool(pool);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(i3c_generic_ibi_alloc_pool);
/**
* i3c_generic_ibi_get_free_slot() - Get a free slot from a generic IBI pool
* @pool: the pool to query an IBI slot on
*
* Search for a free slot in a generic IBI pool.
* The slot should be returned to the pool using i3c_generic_ibi_recycle_slot()
* when it's no longer needed.
*
* Return: a pointer to a free slot, or NULL if there's no free slot available.
*/
struct i3c_ibi_slot *
i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool)
{
struct i3c_generic_ibi_slot *slot;
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
slot = list_first_entry_or_null(&pool->free_slots,
struct i3c_generic_ibi_slot, node);
if (slot)
list_del(&slot->node);
spin_unlock_irqrestore(&pool->lock, flags);
return slot ? &slot->base : NULL;
}
EXPORT_SYMBOL_GPL(i3c_generic_ibi_get_free_slot);
/**
* i3c_generic_ibi_recycle_slot() - Return a slot to a generic IBI pool
* @pool: the pool to return the IBI slot to
* @s: IBI slot to recycle
*
* Add an IBI slot back to its generic IBI pool. Should be called from the
* master driver struct_master_controller_ops->recycle_ibi() method.
*/
void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
struct i3c_ibi_slot *s)
{
struct i3c_generic_ibi_slot *slot;
unsigned long flags;
if (!s)
return;
slot = container_of(s, struct i3c_generic_ibi_slot, base);
spin_lock_irqsave(&pool->lock, flags);
list_add_tail(&slot->node, &pool->free_slots);
spin_unlock_irqrestore(&pool->lock, flags);
}
EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
{
if (!ops || !ops->bus_init || !ops->priv_xfers ||
!ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers)
return -EINVAL;
if (ops->request_ibi &&
(!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
!ops->recycle_ibi_slot))
return -EINVAL;
return 0;
}
/**
* i3c_master_register() - register an I3C master
* @master: master used to send frames on the bus
* @parent: the parent device (the one that provides this I3C master
* controller)
* @ops: the master controller operations
* @secondary: true if you are registering a secondary master. Will return
* -ENOTSUPP if set to true since secondary masters are not yet
* supported
*
* This function takes care of everything for you:
*
* - creates and initializes the I3C bus
* - populates the bus with static I2C devs if @parent->of_node is not
* NULL
* - registers all I3C devices added by the controller during bus
* initialization
* - registers the I2C adapter and all I2C devices
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int i3c_master_register(struct i3c_master_controller *master,
struct device *parent,
const struct i3c_master_controller_ops *ops,
bool secondary)
{
unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
struct i3c_bus *i3cbus = i3c_master_get_bus(master);
enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
struct i2c_dev_boardinfo *i2cbi;
int ret;
/* We do not support secondary masters yet. */
if (secondary)
return -ENOTSUPP;
ret = i3c_master_check_ops(ops);
if (ret)
return ret;
master->dev.parent = parent;
master->dev.of_node = of_node_get(parent->of_node);
master->dev.bus = &i3c_bus_type;
master->dev.type = &i3c_masterdev_type;
master->dev.release = i3c_masterdev_release;
master->ops = ops;
master->secondary = secondary;
INIT_LIST_HEAD(&master->boardinfo.i2c);
INIT_LIST_HEAD(&master->boardinfo.i3c);
ret = i3c_bus_init(i3cbus, master->dev.of_node);
if (ret)
return ret;
device_initialize(&master->dev);
dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
ret = of_populate_i3c_bus(master);
if (ret)
goto err_put_dev;
list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) {
switch (i2cbi->lvr & I3C_LVR_I2C_INDEX_MASK) {
case I3C_LVR_I2C_INDEX(0):
if (mode < I3C_BUS_MODE_MIXED_FAST)
mode = I3C_BUS_MODE_MIXED_FAST;
break;
case I3C_LVR_I2C_INDEX(1):
if (mode < I3C_BUS_MODE_MIXED_LIMITED)
mode = I3C_BUS_MODE_MIXED_LIMITED;
break;
case I3C_LVR_I2C_INDEX(2):
if (mode < I3C_BUS_MODE_MIXED_SLOW)
mode = I3C_BUS_MODE_MIXED_SLOW;
break;
default:
ret = -EINVAL;
goto err_put_dev;
}
if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
}
ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
if (ret)
goto err_put_dev;
master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
if (!master->wq) {
ret = -ENOMEM;
goto err_put_dev;
}
ret = i3c_master_bus_init(master);
if (ret)
goto err_put_dev;
ret = device_add(&master->dev);
if (ret)
goto err_cleanup_bus;
/*
* Expose our I3C bus as an I2C adapter so that I2C devices are exposed
* through the I2C subsystem.
*/
ret = i3c_master_i2c_adapter_init(master);
if (ret)
goto err_del_dev;
/*
* We're done initializing the bus and the controller, we can now
* register I3C devices discovered during the initial DAA.
*/
master->init_done = true;
i3c_bus_normaluse_lock(&master->bus);
i3c_master_register_new_i3c_devs(master);
i3c_bus_normaluse_unlock(&master->bus);
return 0;
err_del_dev:
device_del(&master->dev);
err_cleanup_bus:
i3c_master_bus_cleanup(master);
err_put_dev:
put_device(&master->dev);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_master_register);
/**
* i3c_master_unregister() - unregister an I3C master
* @master: master used to send frames on the bus
*
* Basically undo everything done in i3c_master_register().
*/
void i3c_master_unregister(struct i3c_master_controller *master)
{
i3c_master_i2c_adapter_cleanup(master);
i3c_master_unregister_i3c_devs(master);
i3c_master_bus_cleanup(master);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(i3c_master_unregister);
int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master;
if (!dev)
return -ENOENT;
master = i3c_dev_get_master(dev);
if (!master)
return -EINVAL;
if (!dev->boardinfo || !dev->boardinfo->init_dyn_addr ||
!dev->boardinfo->static_addr)
return -EINVAL;
return i3c_master_setdasa_locked(master, dev->info.static_addr,
dev->boardinfo->init_dyn_addr);
}
int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *xfers,
int nxfers)
{
struct i3c_master_controller *master;
if (!dev)
return -ENOENT;
master = i3c_dev_get_master(dev);
if (!master || !xfers)
return -EINVAL;
if (!master->ops->priv_xfers)
return -ENOTSUPP;
return master->ops->priv_xfers(dev, xfers, nxfers);
}
int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master;
int ret;
if (!dev->ibi)
return -EINVAL;
master = i3c_dev_get_master(dev);
ret = master->ops->disable_ibi(dev);
if (ret)
return ret;
reinit_completion(&dev->ibi->all_ibis_handled);
if (atomic_read(&dev->ibi->pending_ibis))
wait_for_completion(&dev->ibi->all_ibis_handled);
dev->ibi->enabled = false;
return 0;
}
int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
int ret;
if (!dev->ibi)
return -EINVAL;
ret = master->ops->enable_ibi(dev);
if (!ret)
dev->ibi->enabled = true;
return ret;
}
int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
struct i3c_device_ibi_info *ibi;
int ret;
if (!master->ops->request_ibi)
return -ENOTSUPP;
if (dev->ibi)
return -EBUSY;
ibi = kzalloc(sizeof(*ibi), GFP_KERNEL);
if (!ibi)
return -ENOMEM;
atomic_set(&ibi->pending_ibis, 0);
init_completion(&ibi->all_ibis_handled);
ibi->handler = req->handler;
ibi->max_payload_len = req->max_payload_len;
ibi->num_slots = req->num_slots;
dev->ibi = ibi;
ret = master->ops->request_ibi(dev, req);
if (ret) {
kfree(ibi);
dev->ibi = NULL;
}
return ret;
}
void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
if (!dev->ibi)
return;
if (WARN_ON(dev->ibi->enabled))
WARN_ON(i3c_dev_disable_ibi_locked(dev));
master->ops->free_ibi(dev);
kfree(dev->ibi);
dev->ibi = NULL;
}
static int __init i3c_init(void)
{
int res;
res = of_alias_get_highest_id("i3c");
if (res >= 0) {
mutex_lock(&i3c_core_lock);
__i3c_first_dynamic_bus_num = res + 1;
mutex_unlock(&i3c_core_lock);
}
res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
if (res)
return res;
res = bus_register(&i3c_bus_type);
if (res)
goto out_unreg_notifier;
return 0;
out_unreg_notifier:
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
return res;
}
subsys_initcall(i3c_init);
static void __exit i3c_exit(void)
{
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
idr_destroy(&i3c_bus_idr);
bus_unregister(&i3c_bus_type);
}
module_exit(i3c_exit);
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_DESCRIPTION("I3C core");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/i3c/master.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Cadence Design Systems Inc.
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "internals.h"
/**
* i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a
* specific device
*
* @dev: device with which the transfers should be done
* @xfers: array of transfers
* @nxfers: number of transfers
*
* Initiate one or several private SDR transfers with @dev.
*
* This function can sleep and thus cannot be called in atomic context.
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int i3c_device_do_priv_xfers(struct i3c_device *dev,
struct i3c_priv_xfer *xfers,
int nxfers)
{
int ret, i;
if (nxfers < 1)
return 0;
for (i = 0; i < nxfers; i++) {
if (!xfers[i].len || !xfers[i].data.in)
return -EINVAL;
}
i3c_bus_normaluse_lock(dev->bus);
ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers);
i3c_bus_normaluse_unlock(dev->bus);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
/**
* i3c_device_do_setdasa() - do I3C dynamic address assignement with
* static address
*
* @dev: device with which the DAA should be done
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int i3c_device_do_setdasa(struct i3c_device *dev)
{
int ret;
i3c_bus_normaluse_lock(dev->bus);
ret = i3c_dev_setdasa_locked(dev->desc);
i3c_bus_normaluse_unlock(dev->bus);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_device_do_setdasa);
/**
* i3c_device_get_info() - get I3C device information
*
* @dev: device we want information on
* @info: the information object to fill in
*
* Retrieve I3C dev info.
*/
void i3c_device_get_info(const struct i3c_device *dev,
struct i3c_device_info *info)
{
if (!info)
return;
i3c_bus_normaluse_lock(dev->bus);
if (dev->desc)
*info = dev->desc->info;
i3c_bus_normaluse_unlock(dev->bus);
}
EXPORT_SYMBOL_GPL(i3c_device_get_info);
/**
* i3c_device_disable_ibi() - Disable IBIs coming from a specific device
* @dev: device on which IBIs should be disabled
*
* This function disable IBIs coming from a specific device and wait for
* all pending IBIs to be processed.
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int i3c_device_disable_ibi(struct i3c_device *dev)
{
int ret = -ENOENT;
i3c_bus_normaluse_lock(dev->bus);
if (dev->desc) {
mutex_lock(&dev->desc->ibi_lock);
ret = i3c_dev_disable_ibi_locked(dev->desc);
mutex_unlock(&dev->desc->ibi_lock);
}
i3c_bus_normaluse_unlock(dev->bus);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_device_disable_ibi);
/**
* i3c_device_enable_ibi() - Enable IBIs coming from a specific device
* @dev: device on which IBIs should be enabled
*
* This function enable IBIs coming from a specific device and wait for
* all pending IBIs to be processed. This should be called on a device
* where i3c_device_request_ibi() has succeeded.
*
* Note that IBIs from this device might be received before this function
* returns to its caller.
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int i3c_device_enable_ibi(struct i3c_device *dev)
{
int ret = -ENOENT;
i3c_bus_normaluse_lock(dev->bus);
if (dev->desc) {
mutex_lock(&dev->desc->ibi_lock);
ret = i3c_dev_enable_ibi_locked(dev->desc);
mutex_unlock(&dev->desc->ibi_lock);
}
i3c_bus_normaluse_unlock(dev->bus);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_device_enable_ibi);
/**
* i3c_device_request_ibi() - Request an IBI
* @dev: device for which we should enable IBIs
* @req: setup requested for this IBI
*
* This function is responsible for pre-allocating all resources needed to
* process IBIs coming from @dev. When this function returns, the IBI is not
* enabled until i3c_device_enable_ibi() is called.
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int i3c_device_request_ibi(struct i3c_device *dev,
const struct i3c_ibi_setup *req)
{
int ret = -ENOENT;
if (!req->handler || !req->num_slots)
return -EINVAL;
i3c_bus_normaluse_lock(dev->bus);
if (dev->desc) {
mutex_lock(&dev->desc->ibi_lock);
ret = i3c_dev_request_ibi_locked(dev->desc, req);
mutex_unlock(&dev->desc->ibi_lock);
}
i3c_bus_normaluse_unlock(dev->bus);
return ret;
}
EXPORT_SYMBOL_GPL(i3c_device_request_ibi);
/**
* i3c_device_free_ibi() - Free all resources needed for IBI handling
* @dev: device on which you want to release IBI resources
*
* This function is responsible for de-allocating resources previously
* allocated by i3c_device_request_ibi(). It should be called after disabling
* IBIs with i3c_device_disable_ibi().
*/
void i3c_device_free_ibi(struct i3c_device *dev)
{
i3c_bus_normaluse_lock(dev->bus);
if (dev->desc) {
mutex_lock(&dev->desc->ibi_lock);
i3c_dev_free_ibi_locked(dev->desc);
mutex_unlock(&dev->desc->ibi_lock);
}
i3c_bus_normaluse_unlock(dev->bus);
}
EXPORT_SYMBOL_GPL(i3c_device_free_ibi);
/**
* i3cdev_to_dev() - Returns the device embedded in @i3cdev
* @i3cdev: I3C device
*
* Return: a pointer to a device object.
*/
struct device *i3cdev_to_dev(struct i3c_device *i3cdev)
{
return &i3cdev->dev;
}
EXPORT_SYMBOL_GPL(i3cdev_to_dev);
/**
* i3c_device_match_id() - Returns the i3c_device_id entry matching @i3cdev
* @i3cdev: I3C device
* @id_table: I3C device match table
*
* Return: a pointer to an i3c_device_id object or NULL if there's no match.
*/
const struct i3c_device_id *
i3c_device_match_id(struct i3c_device *i3cdev,
const struct i3c_device_id *id_table)
{
struct i3c_device_info devinfo;
const struct i3c_device_id *id;
u16 manuf, part, ext_info;
bool rndpid;
i3c_device_get_info(i3cdev, &devinfo);
manuf = I3C_PID_MANUF_ID(devinfo.pid);
part = I3C_PID_PART_ID(devinfo.pid);
ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
rndpid = I3C_PID_RND_LOWER_32BITS(devinfo.pid);
for (id = id_table; id->match_flags != 0; id++) {
if ((id->match_flags & I3C_MATCH_DCR) &&
id->dcr != devinfo.dcr)
continue;
if ((id->match_flags & I3C_MATCH_MANUF) &&
id->manuf_id != manuf)
continue;
if ((id->match_flags & I3C_MATCH_PART) &&
(rndpid || id->part_id != part))
continue;
if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
(rndpid || id->extra_info != ext_info))
continue;
return id;
}
return NULL;
}
EXPORT_SYMBOL_GPL(i3c_device_match_id);
/**
* i3c_driver_register_with_owner() - register an I3C device driver
*
* @drv: driver to register
* @owner: module that owns this driver
*
* Register @drv to the core.
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner)
{
drv->driver.owner = owner;
drv->driver.bus = &i3c_bus_type;
if (!drv->probe) {
pr_err("Trying to register an i3c driver without probe callback\n");
return -EINVAL;
}
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(i3c_driver_register_with_owner);
/**
* i3c_driver_unregister() - unregister an I3C device driver
*
* @drv: driver to unregister
*
* Unregister @drv.
*/
void i3c_driver_unregister(struct i3c_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(i3c_driver_unregister);
| linux-master | drivers/i3c/device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Silvaco dual-role I3C master driver
*
* Copyright (C) 2020 Silvaco
* Author: Miquel RAYNAL <[email protected]>
* Based on a work from: Conor Culhane <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
/* Master Mode Registers */
#define SVC_I3C_MCONFIG 0x000
#define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
#define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
#define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
#define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
#define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
#define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
#define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
#define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
#define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
#define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
#define SVC_I3C_MCTRL 0x084
#define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
#define SVC_I3C_MCTRL_REQUEST_NONE 0
#define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
#define SVC_I3C_MCTRL_REQUEST_STOP 2
#define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
#define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
#define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
#define SVC_I3C_MCTRL_TYPE_I3C 0
#define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
#define SVC_I3C_MCTRL_IBIRESP_AUTO 0
#define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
#define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
#define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
#define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
#define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
#define SVC_I3C_MCTRL_DIR_WRITE 0
#define SVC_I3C_MCTRL_DIR_READ 1
#define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
#define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
#define SVC_I3C_MSTATUS 0x088
#define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
#define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
#define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
#define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
#define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
#define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
#define SVC_I3C_MSTATUS_IBITYPE_IBI 1
#define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
#define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
#define SVC_I3C_MINT_SLVSTART BIT(8)
#define SVC_I3C_MINT_MCTRLDONE BIT(9)
#define SVC_I3C_MINT_COMPLETE BIT(10)
#define SVC_I3C_MINT_RXPEND BIT(11)
#define SVC_I3C_MINT_TXNOTFULL BIT(12)
#define SVC_I3C_MINT_IBIWON BIT(13)
#define SVC_I3C_MINT_ERRWARN BIT(15)
#define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
#define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
#define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
#define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
#define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
#define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
#define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
#define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
#define SVC_I3C_IBIRULES 0x08C
#define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
((addr) & 0x3F) << ((slot) * 6))
#define SVC_I3C_IBIRULES_ADDRS 5
#define SVC_I3C_IBIRULES_MSB0 BIT(30)
#define SVC_I3C_IBIRULES_NOBYTE BIT(31)
#define SVC_I3C_IBIRULES_MANDBYTE 0
#define SVC_I3C_MINTSET 0x090
#define SVC_I3C_MINTCLR 0x094
#define SVC_I3C_MINTMASKED 0x098
#define SVC_I3C_MERRWARN 0x09C
#define SVC_I3C_MERRWARN_NACK BIT(2)
#define SVC_I3C_MDMACTRL 0x0A0
#define SVC_I3C_MDATACTRL 0x0AC
#define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
#define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
#define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
#define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
#define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
#define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
#define SVC_I3C_MDATACTRL_TXFULL BIT(30)
#define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
#define SVC_I3C_MWDATAB 0x0B0
#define SVC_I3C_MWDATAB_END BIT(8)
#define SVC_I3C_MWDATABE 0x0B4
#define SVC_I3C_MWDATAH 0x0B8
#define SVC_I3C_MWDATAHE 0x0BC
#define SVC_I3C_MRDATAB 0x0C0
#define SVC_I3C_MRDATAH 0x0C8
#define SVC_I3C_MWMSG_SDR 0x0D0
#define SVC_I3C_MRMSG_SDR 0x0D4
#define SVC_I3C_MWMSG_DDR 0x0D8
#define SVC_I3C_MRMSG_DDR 0x0DC
#define SVC_I3C_MDYNADDR 0x0E4
#define SVC_MDYNADDR_VALID BIT(0)
#define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
#define SVC_I3C_MAX_DEVS 32
#define SVC_I3C_PM_TIMEOUT_MS 1000
/* This parameter depends on the implementation and may be tuned */
#define SVC_I3C_FIFO_SIZE 16
struct svc_i3c_cmd {
u8 addr;
bool rnw;
u8 *in;
const void *out;
unsigned int len;
unsigned int read_len;
bool continued;
};
struct svc_i3c_xfer {
struct list_head node;
struct completion comp;
int ret;
unsigned int type;
unsigned int ncmds;
struct svc_i3c_cmd cmds[];
};
struct svc_i3c_regs_save {
u32 mconfig;
u32 mdynaddr;
};
/**
* struct svc_i3c_master - Silvaco I3C Master structure
* @base: I3C master controller
* @dev: Corresponding device
* @regs: Memory mapping
* @saved_regs: Volatile values for PM operations
* @free_slots: Bit array of available slots
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
* @hj_work: Hot-join work
* @ibi_work: IBI work
* @irq: Main interrupt
* @pclk: System clock
* @fclk: Fast clock (bus)
* @sclk: Slow clock (other events)
* @xferqueue: Transfer queue structure
* @xferqueue.list: List member
* @xferqueue.cur: Current ongoing transfer
* @xferqueue.lock: Queue lock
* @ibi: IBI structure
* @ibi.num_slots: Number of slots available in @ibi.slots
* @ibi.slots: Available IBI slots
* @ibi.tbq_slot: To be queued IBI slot
* @ibi.lock: IBI lock
*/
struct svc_i3c_master {
struct i3c_master_controller base;
struct device *dev;
void __iomem *regs;
struct svc_i3c_regs_save saved_regs;
u32 free_slots;
u8 addrs[SVC_I3C_MAX_DEVS];
struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
struct work_struct hj_work;
struct work_struct ibi_work;
int irq;
struct clk *pclk;
struct clk *fclk;
struct clk *sclk;
struct {
struct list_head list;
struct svc_i3c_xfer *cur;
/* Prevent races between transfers */
spinlock_t lock;
} xferqueue;
struct {
unsigned int num_slots;
struct i3c_dev_desc **slots;
struct i3c_ibi_slot *tbq_slot;
/* Prevent races within IBI handlers */
spinlock_t lock;
} ibi;
};
/**
* struct svc_i3c_i2c_dev_data - Device specific data
* @index: Index in the master tables corresponding to this device
* @ibi: IBI slot index in the master structure
* @ibi_pool: IBI pool associated to this device
*/
struct svc_i3c_i2c_dev_data {
u8 index;
int ibi;
struct i3c_generic_ibi_pool *ibi_pool;
};
static bool svc_i3c_master_error(struct svc_i3c_master *master)
{
u32 mstatus, merrwarn;
mstatus = readl(master->regs + SVC_I3C_MSTATUS);
if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
dev_err(master->dev,
"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
mstatus, merrwarn);
return true;
}
return false;
}
static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
{
writel(mask, master->regs + SVC_I3C_MINTSET);
}
static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
{
u32 mask = readl(master->regs + SVC_I3C_MINTSET);
writel(mask, master->regs + SVC_I3C_MINTCLR);
}
static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
{
/* Clear pending warnings */
writel(readl(master->regs + SVC_I3C_MERRWARN),
master->regs + SVC_I3C_MERRWARN);
}
static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
{
/* Flush FIFOs */
writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
master->regs + SVC_I3C_MDATACTRL);
}
static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
{
u32 reg;
/* Set RX and TX tigger levels, flush FIFOs */
reg = SVC_I3C_MDATACTRL_FLUSHTB |
SVC_I3C_MDATACTRL_FLUSHRB |
SVC_I3C_MDATACTRL_UNLOCK_TRIG |
SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
writel(reg, master->regs + SVC_I3C_MDATACTRL);
}
static void svc_i3c_master_reset(struct svc_i3c_master *master)
{
svc_i3c_master_clear_merrwarn(master);
svc_i3c_master_reset_fifo_trigger(master);
svc_i3c_master_disable_interrupts(master);
}
static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller *master)
{
return container_of(master, struct svc_i3c_master, base);
}
static void svc_i3c_master_hj_work(struct work_struct *work)
{
struct svc_i3c_master *master;
master = container_of(work, struct svc_i3c_master, hj_work);
i3c_master_do_daa(&master->base);
}
static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
unsigned int ibiaddr)
{
int i;
for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
if (master->addrs[i] == ibiaddr)
break;
if (i == SVC_I3C_MAX_DEVS)
return NULL;
return master->descs[i];
}
static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
{
writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
/*
* This delay is necessary after the emission of a stop, otherwise eg.
* repeating IBIs do not get detected. There is a note in the manual
* about it, stating that the stop condition might not be settled
* correctly if a start condition follows too rapidly.
*/
udelay(1);
}
static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
struct i3c_dev_desc *dev)
{
struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_ibi_slot *slot;
unsigned int count;
u32 mdatactrl;
u8 *buf;
slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
if (!slot)
return -ENOSPC;
slot->len = 0;
buf = slot->data;
while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
slot->len < SVC_I3C_FIFO_SIZE) {
mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
slot->len += count;
buf += count;
}
master->ibi.tbq_slot = slot;
return 0;
}
static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
bool mandatory_byte)
{
unsigned int ibi_ack_nack;
ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
if (mandatory_byte)
ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
else
ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
}
static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
{
writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
SVC_I3C_MCTRL_IBIRESP_NACK,
master->regs + SVC_I3C_MCTRL);
}
static void svc_i3c_master_ibi_work(struct work_struct *work)
{
struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
struct svc_i3c_i2c_dev_data *data;
unsigned int ibitype, ibiaddr;
struct i3c_dev_desc *dev;
u32 status, val;
int ret;
/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
SVC_I3C_MCTRL_IBIRESP_AUTO,
master->regs + SVC_I3C_MCTRL);
/* Wait for IBIWON, should take approximately 100us */
ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
if (ret) {
dev_err(master->dev, "Timeout when polling for IBIWON\n");
goto reenable_ibis;
}
/* Clear the interrupt status */
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
status = readl(master->regs + SVC_I3C_MSTATUS);
ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
/* Handle the critical responses to IBI's */
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
if (!dev)
svc_i3c_master_nack_ibi(master);
else
svc_i3c_master_handle_ibi(master, dev);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
svc_i3c_master_ack_ibi(master, false);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
svc_i3c_master_nack_ibi(master);
break;
default:
break;
}
/*
* If an error happened, we probably got interrupted and the exchange
* timedout. In this case we just drop everything, emit a stop and wait
* for the slave to interrupt again.
*/
if (svc_i3c_master_error(master)) {
if (master->ibi.tbq_slot) {
data = i3c_dev_get_master_data(dev);
i3c_generic_ibi_recycle_slot(data->ibi_pool,
master->ibi.tbq_slot);
master->ibi.tbq_slot = NULL;
}
svc_i3c_master_emit_stop(master);
goto reenable_ibis;
}
/* Handle the non critical tasks */
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
if (dev) {
i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
master->ibi.tbq_slot = NULL;
}
svc_i3c_master_emit_stop(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
queue_work(master->base.wq, &master->hj_work);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
default:
break;
}
reenable_ibis:
svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
}
static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
{
struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
if (!SVC_I3C_MSTATUS_SLVSTART(active))
return IRQ_NONE;
/* Clear the interrupt status */
writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
svc_i3c_master_disable_interrupts(master);
/* Handle the interrupt in a non atomic context */
queue_work(master->base.wq, &master->ibi_work);
return IRQ_HANDLED;
}
static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
struct i3c_device_info info = {};
unsigned long fclk_rate, fclk_period_ns;
unsigned int high_period_ns, od_low_period_ns;
u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
int ret;
ret = pm_runtime_resume_and_get(master->dev);
if (ret < 0) {
dev_err(master->dev,
"<%s> cannot resume i3c bus master, err: %d\n",
__func__, ret);
return ret;
}
/* Timings derivation */
fclk_rate = clk_get_rate(master->fclk);
if (!fclk_rate) {
ret = -EINVAL;
goto rpm_out;
}
fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
/*
* Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
* Simplest configuration is using a 50% duty-cycle of 40ns.
*/
ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
pplow = 0;
/*
* Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
* duty-cycle tuned so that high levels are filetered out by
* the 50ns filter (target being 40ns).
*/
odhpp = 1;
high_period_ns = (ppbaud + 1) * fclk_period_ns;
odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
od_low_period_ns = (odbaud + 1) * high_period_ns;
switch (bus->mode) {
case I3C_BUS_MODE_PURE:
i2cbaud = 0;
odstop = 0;
break;
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
/*
* Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
* between the high and low period does not really matter.
*/
i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
odstop = 1;
break;
case I3C_BUS_MODE_MIXED_SLOW:
/*
* Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
* constraints as the FM+ mode.
*/
i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
odstop = 1;
break;
default:
goto rpm_out;
}
reg = SVC_I3C_MCONFIG_MASTER_EN |
SVC_I3C_MCONFIG_DISTO(0) |
SVC_I3C_MCONFIG_HKEEP(0) |
SVC_I3C_MCONFIG_ODSTOP(odstop) |
SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
SVC_I3C_MCONFIG_PPLOW(pplow) |
SVC_I3C_MCONFIG_ODBAUD(odbaud) |
SVC_I3C_MCONFIG_ODHPP(odhpp) |
SVC_I3C_MCONFIG_SKEW(0) |
SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
writel(reg, master->regs + SVC_I3C_MCONFIG);
/* Master core's registration */
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
goto rpm_out;
info.dyn_addr = ret;
writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
master->regs + SVC_I3C_MDYNADDR);
ret = i3c_master_set_info(&master->base, &info);
if (ret)
goto rpm_out;
rpm_out:
pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
int ret;
ret = pm_runtime_resume_and_get(master->dev);
if (ret < 0) {
dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
return;
}
svc_i3c_master_disable_interrupts(master);
/* Disable master */
writel(0, master->regs + SVC_I3C_MCONFIG);
pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
{
unsigned int slot;
if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
return -ENOSPC;
slot = ffs(master->free_slots) - 1;
master->free_slots &= ~BIT(slot);
return slot;
}
static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
unsigned int slot)
{
master->free_slots |= BIT(slot);
}
static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct svc_i3c_i2c_dev_data *data;
int slot;
slot = svc_i3c_master_reserve_slot(master);
if (slot < 0)
return slot;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
svc_i3c_master_release_slot(master, slot);
return -ENOMEM;
}
data->ibi = -1;
data->index = slot;
master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
dev->info.static_addr;
master->descs[slot] = dev;
i3c_dev_set_master_data(dev, data);
return 0;
}
static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
u8 old_dyn_addr)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
dev->info.static_addr;
return 0;
}
static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
{
struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
master->addrs[data->index] = 0;
svc_i3c_master_release_slot(master, data->index);
kfree(data);
}
static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct svc_i3c_i2c_dev_data *data;
int slot;
slot = svc_i3c_master_reserve_slot(master);
if (slot < 0)
return slot;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
svc_i3c_master_release_slot(master, slot);
return -ENOMEM;
}
data->index = slot;
master->addrs[slot] = dev->addr;
i2c_dev_set_master_data(dev, data);
return 0;
}
static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
{
struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
svc_i3c_master_release_slot(master, data->index);
kfree(data);
}
static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
unsigned int len)
{
int ret, i;
u32 reg;
for (i = 0; i < len; i++) {
ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
reg,
SVC_I3C_MSTATUS_RXPEND(reg),
0, 1000);
if (ret)
return ret;
dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
}
return 0;
}
static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
u8 *addrs, unsigned int *count)
{
u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
unsigned int dev_nb = 0, last_addr = 0;
u32 reg;
int ret, i;
while (true) {
/* Enter/proceed with DAA */
writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
SVC_I3C_MCTRL_TYPE_I3C |
SVC_I3C_MCTRL_IBIRESP_NACK |
SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
master->regs + SVC_I3C_MCTRL);
/*
* Either one slave will send its ID, or the assignment process
* is done.
*/
ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
reg,
SVC_I3C_MSTATUS_RXPEND(reg) |
SVC_I3C_MSTATUS_MCTRLDONE(reg),
1, 1000);
if (ret)
return ret;
if (SVC_I3C_MSTATUS_RXPEND(reg)) {
u8 data[6];
/*
* We only care about the 48-bit provisional ID yet to
* be sure a device does not nack an address twice.
* Otherwise, we would just need to flush the RX FIFO.
*/
ret = svc_i3c_master_readb(master, data, 6);
if (ret)
return ret;
for (i = 0; i < 6; i++)
prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
/* We do not care about the BCR and DCR yet */
ret = svc_i3c_master_readb(master, data, 2);
if (ret)
return ret;
} else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
SVC_I3C_MSTATUS_COMPLETE(reg)) {
/*
* All devices received and acked they dynamic
* address, this is the natural end of the DAA
* procedure.
*/
break;
} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
/* No I3C devices attached */
if (dev_nb == 0)
break;
/*
* A slave device nacked the address, this is
* allowed only once, DAA will be stopped and
* then resumed. The same device is supposed to
* answer again immediately and shall ack the
* address this time.
*/
if (prov_id[dev_nb] == nacking_prov_id)
return -EIO;
dev_nb--;
nacking_prov_id = prov_id[dev_nb];
svc_i3c_master_emit_stop(master);
continue;
} else {
return -EIO;
}
}
/* Wait for the slave to be ready to receive its address */
ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
reg,
SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
SVC_I3C_MSTATUS_STATE_DAA(reg) &&
SVC_I3C_MSTATUS_BETWEEN(reg),
0, 1000);
if (ret)
return ret;
/* Give the slave device a suitable dynamic address */
ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
if (ret < 0)
return ret;
addrs[dev_nb] = ret;
dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
dev_nb, addrs[dev_nb]);
writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
last_addr = addrs[dev_nb++];
}
*count = dev_nb;
return 0;
}
static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
{
struct i3c_dev_desc *dev;
u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
nobyte_addr_ko = 0;
bool list_mbyte = false, list_nobyte = false;
/* Create the IBIRULES register for both cases */
i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
continue;
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
dev->info.dyn_addr);
/* IBI rules cannot be applied to devices with MSb=1 */
if (dev->info.dyn_addr & BIT(7))
mbyte_addr_ko++;
else
mbyte_addr_ok++;
} else {
reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
dev->info.dyn_addr);
/* IBI rules cannot be applied to devices with MSb=1 */
if (dev->info.dyn_addr & BIT(7))
nobyte_addr_ko++;
else
nobyte_addr_ok++;
}
}
/* Device list cannot be handled by hardware */
if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
list_mbyte = true;
if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
list_nobyte = true;
/* No list can be properly handled, return an error */
if (!list_mbyte && !list_nobyte)
return -ERANGE;
/* Pick the first list that can be handled by hardware, randomly */
if (list_mbyte)
writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
else
writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
return 0;
}
static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
u8 addrs[SVC_I3C_MAX_DEVS];
unsigned long flags;
unsigned int dev_nb;
int ret, i;
ret = pm_runtime_resume_and_get(master->dev);
if (ret < 0) {
dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
return ret;
}
spin_lock_irqsave(&master->xferqueue.lock, flags);
ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
if (ret) {
svc_i3c_master_emit_stop(master);
svc_i3c_master_clear_merrwarn(master);
goto rpm_out;
}
/* Register all devices who participated to the core */
for (i = 0; i < dev_nb; i++) {
ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
if (ret)
goto rpm_out;
}
/* Configure IBI auto-rules */
ret = svc_i3c_update_ibirules(master);
if (ret)
dev_err(master->dev, "Cannot handle such a list of devices");
rpm_out:
pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
static int svc_i3c_master_read(struct svc_i3c_master *master,
u8 *in, unsigned int len)
{
int offset = 0, i;
u32 mdctrl, mstatus;
bool completed = false;
unsigned int count;
unsigned long start = jiffies;
while (!completed) {
mstatus = readl(master->regs + SVC_I3C_MSTATUS);
if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
completed = true;
if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
dev_dbg(master->dev, "I3C read timeout\n");
return -ETIMEDOUT;
}
mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
if (offset + count > len) {
dev_err(master->dev, "I3C receive length too long!\n");
return -EINVAL;
}
for (i = 0; i < count; i++)
in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
offset += count;
}
return offset;
}
static int svc_i3c_master_write(struct svc_i3c_master *master,
const u8 *out, unsigned int len)
{
int offset = 0, ret;
u32 mdctrl;
while (offset < len) {
ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
mdctrl,
!(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
0, 1000);
if (ret)
return ret;
/*
* The last byte to be sent over the bus must either have the
* "end" bit set or be written in MWDATABE.
*/
if (likely(offset < (len - 1)))
writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
else
writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
}
return 0;
}
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
unsigned int *read_len, bool continued)
{
u32 reg;
int ret;
writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
xfer_type |
SVC_I3C_MCTRL_IBIRESP_NACK |
SVC_I3C_MCTRL_DIR(rnw) |
SVC_I3C_MCTRL_ADDR(addr) |
SVC_I3C_MCTRL_RDTERM(*read_len),
master->regs + SVC_I3C_MCTRL);
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
if (ret)
goto emit_stop;
if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
ret = -ENXIO;
goto emit_stop;
}
if (rnw)
ret = svc_i3c_master_read(master, in, xfer_len);
else
ret = svc_i3c_master_write(master, out, xfer_len);
if (ret < 0)
goto emit_stop;
if (rnw)
*read_len = ret;
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
if (ret)
goto emit_stop;
writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
if (!continued) {
svc_i3c_master_emit_stop(master);
/* Wait idle if stop is sent. */
readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
}
return 0;
emit_stop:
svc_i3c_master_emit_stop(master);
svc_i3c_master_clear_merrwarn(master);
return ret;
}
static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
{
struct svc_i3c_xfer *xfer;
xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
if (!xfer)
return NULL;
INIT_LIST_HEAD(&xfer->node);
xfer->ncmds = ncmds;
xfer->ret = -ETIMEDOUT;
return xfer;
}
static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
{
kfree(xfer);
}
static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
struct svc_i3c_xfer *xfer)
{
if (master->xferqueue.cur == xfer)
master->xferqueue.cur = NULL;
else
list_del_init(&xfer->node);
}
static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
struct svc_i3c_xfer *xfer)
{
unsigned long flags;
spin_lock_irqsave(&master->xferqueue.lock, flags);
svc_i3c_master_dequeue_xfer_locked(master, xfer);
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
{
struct svc_i3c_xfer *xfer = master->xferqueue.cur;
int ret, i;
if (!xfer)
return;
svc_i3c_master_clear_merrwarn(master);
svc_i3c_master_flush_fifo(master);
for (i = 0; i < xfer->ncmds; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
cmd->len, &cmd->read_len,
cmd->continued);
if (ret)
break;
}
xfer->ret = ret;
complete(&xfer->comp);
if (ret < 0)
svc_i3c_master_dequeue_xfer_locked(master, xfer);
xfer = list_first_entry_or_null(&master->xferqueue.list,
struct svc_i3c_xfer,
node);
if (xfer)
list_del_init(&xfer->node);
master->xferqueue.cur = xfer;
svc_i3c_master_start_xfer_locked(master);
}
static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
struct svc_i3c_xfer *xfer)
{
unsigned long flags;
int ret;
ret = pm_runtime_resume_and_get(master->dev);
if (ret < 0) {
dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
return;
}
init_completion(&xfer->comp);
spin_lock_irqsave(&master->xferqueue.lock, flags);
if (master->xferqueue.cur) {
list_add_tail(&xfer->node, &master->xferqueue.list);
} else {
master->xferqueue.cur = xfer;
svc_i3c_master_start_xfer_locked(master);
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
const struct i3c_ccc_cmd *cmd)
{
/* No software support for CCC commands targeting more than one slave */
return (cmd->ndests == 1);
}
static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
struct i3c_ccc_cmd *ccc)
{
unsigned int xfer_len = ccc->dests[0].payload.len + 1;
struct svc_i3c_xfer *xfer;
struct svc_i3c_cmd *cmd;
u8 *buf;
int ret;
xfer = svc_i3c_master_alloc_xfer(master, 1);
if (!xfer)
return -ENOMEM;
buf = kmalloc(xfer_len, GFP_KERNEL);
if (!buf) {
svc_i3c_master_free_xfer(xfer);
return -ENOMEM;
}
buf[0] = ccc->id;
memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
cmd = &xfer->cmds[0];
cmd->addr = ccc->dests[0].addr;
cmd->rnw = ccc->rnw;
cmd->in = NULL;
cmd->out = buf;
cmd->len = xfer_len;
cmd->read_len = 0;
cmd->continued = false;
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
kfree(buf);
svc_i3c_master_free_xfer(xfer);
return ret;
}
static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
struct i3c_ccc_cmd *ccc)
{
unsigned int xfer_len = ccc->dests[0].payload.len;
unsigned int read_len = ccc->rnw ? xfer_len : 0;
struct svc_i3c_xfer *xfer;
struct svc_i3c_cmd *cmd;
int ret;
xfer = svc_i3c_master_alloc_xfer(master, 2);
if (!xfer)
return -ENOMEM;
xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
/* Broadcasted message */
cmd = &xfer->cmds[0];
cmd->addr = I3C_BROADCAST_ADDR;
cmd->rnw = 0;
cmd->in = NULL;
cmd->out = &ccc->id;
cmd->len = 1;
cmd->read_len = 0;
cmd->continued = true;
/* Directed message */
cmd = &xfer->cmds[1];
cmd->addr = ccc->dests[0].addr;
cmd->rnw = ccc->rnw;
cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
cmd->len = xfer_len;
cmd->read_len = read_len;
cmd->continued = false;
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
if (cmd->read_len != xfer_len)
ccc->dests[0].payload.len = cmd->read_len;
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
return ret;
}
static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
struct i3c_ccc_cmd *cmd)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
bool broadcast = cmd->id < 0x80;
int ret;
if (broadcast)
ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
else
ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
if (ret)
cmd->err = I3C_ERROR_M2;
return ret;
}
static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *xfers,
int nxfers)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct svc_i3c_xfer *xfer;
int ret, i;
xfer = svc_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
return -ENOMEM;
xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
for (i = 0; i < nxfers; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
cmd->addr = master->addrs[data->index];
cmd->rnw = xfers[i].rnw;
cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
cmd->len = xfers[i].len;
cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
cmd->continued = (i + 1) < nxfers;
}
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
return ret;
}
static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
const struct i2c_msg *xfers,
int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
struct svc_i3c_xfer *xfer;
int ret, i;
xfer = svc_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
return -ENOMEM;
xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
for (i = 0; i < nxfers; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
cmd->addr = master->addrs[data->index];
cmd->rnw = xfers[i].flags & I2C_M_RD;
cmd->in = cmd->rnw ? xfers[i].buf : NULL;
cmd->out = cmd->rnw ? NULL : xfers[i].buf;
cmd->len = xfers[i].len;
cmd->read_len = cmd->rnw ? xfers[i].len : 0;
cmd->continued = (i + 1 < nxfers);
}
svc_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
return ret;
}
static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
unsigned long flags;
unsigned int i;
if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
dev_err(master->dev, "IBI max payload %d should be < %d\n",
dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
return -ERANGE;
}
data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
if (IS_ERR(data->ibi_pool))
return PTR_ERR(data->ibi_pool);
spin_lock_irqsave(&master->ibi.lock, flags);
for (i = 0; i < master->ibi.num_slots; i++) {
if (!master->ibi.slots[i]) {
data->ibi = i;
master->ibi.slots[i] = dev;
break;
}
}
spin_unlock_irqrestore(&master->ibi.lock, flags);
if (i < master->ibi.num_slots)
return 0;
i3c_generic_ibi_free_pool(data->ibi_pool);
data->ibi_pool = NULL;
return -ENOSPC;
}
static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
unsigned long flags;
spin_lock_irqsave(&master->ibi.lock, flags);
master->ibi.slots[data->ibi] = NULL;
data->ibi = -1;
spin_unlock_irqrestore(&master->ibi.lock, flags);
i3c_generic_ibi_free_pool(data->ibi_pool);
}
static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
int ret;
ret = pm_runtime_resume_and_get(master->dev);
if (ret < 0) {
dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
return ret;
}
svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
}
static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
int ret;
svc_i3c_master_disable_interrupts(master);
ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
}
static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.bus_init = svc_i3c_master_bus_init,
.bus_cleanup = svc_i3c_master_bus_cleanup,
.attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
.detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
.reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
.attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
.detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
.do_daa = svc_i3c_master_do_daa,
.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
.priv_xfers = svc_i3c_master_priv_xfers,
.i2c_xfers = svc_i3c_master_i2c_xfers,
.request_ibi = svc_i3c_master_request_ibi,
.free_ibi = svc_i3c_master_free_ibi,
.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
.enable_ibi = svc_i3c_master_enable_ibi,
.disable_ibi = svc_i3c_master_disable_ibi,
};
static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
{
int ret = 0;
ret = clk_prepare_enable(master->pclk);
if (ret)
return ret;
ret = clk_prepare_enable(master->fclk);
if (ret) {
clk_disable_unprepare(master->pclk);
return ret;
}
ret = clk_prepare_enable(master->sclk);
if (ret) {
clk_disable_unprepare(master->pclk);
clk_disable_unprepare(master->fclk);
return ret;
}
return 0;
}
static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
{
clk_disable_unprepare(master->pclk);
clk_disable_unprepare(master->fclk);
clk_disable_unprepare(master->sclk);
}
static int svc_i3c_master_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct svc_i3c_master *master;
int ret;
master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
master->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(master->pclk))
return PTR_ERR(master->pclk);
master->fclk = devm_clk_get(dev, "fast_clk");
if (IS_ERR(master->fclk))
return PTR_ERR(master->fclk);
master->sclk = devm_clk_get(dev, "slow_clk");
if (IS_ERR(master->sclk))
return PTR_ERR(master->sclk);
master->irq = platform_get_irq(pdev, 0);
if (master->irq < 0)
return master->irq;
master->dev = dev;
ret = svc_i3c_master_prepare_clks(master);
if (ret)
return ret;
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
IRQF_NO_SUSPEND, "svc-i3c-irq", master);
if (ret)
goto err_disable_clks;
master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
spin_lock_init(&master->xferqueue.lock);
INIT_LIST_HEAD(&master->xferqueue.list);
spin_lock_init(&master->ibi.lock);
master->ibi.num_slots = SVC_I3C_MAX_DEVS;
master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
sizeof(*master->ibi.slots),
GFP_KERNEL);
if (!master->ibi.slots) {
ret = -ENOMEM;
goto err_disable_clks;
}
platform_set_drvdata(pdev, master);
pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
svc_i3c_master_reset(master);
/* Register the master */
ret = i3c_master_register(&master->base, &pdev->dev,
&svc_i3c_master_ops, false);
if (ret)
goto rpm_disable;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
rpm_disable:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
err_disable_clks:
svc_i3c_master_unprepare_clks(master);
return ret;
}
static void svc_i3c_master_remove(struct platform_device *pdev)
{
struct svc_i3c_master *master = platform_get_drvdata(pdev);
i3c_master_unregister(&master->base);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
static void svc_i3c_save_regs(struct svc_i3c_master *master)
{
master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
}
static void svc_i3c_restore_regs(struct svc_i3c_master *master)
{
if (readl(master->regs + SVC_I3C_MDYNADDR) !=
master->saved_regs.mdynaddr) {
writel(master->saved_regs.mconfig,
master->regs + SVC_I3C_MCONFIG);
writel(master->saved_regs.mdynaddr,
master->regs + SVC_I3C_MDYNADDR);
}
}
static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
svc_i3c_save_regs(master);
svc_i3c_master_unprepare_clks(master);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
pinctrl_pm_select_default_state(dev);
svc_i3c_master_prepare_clks(master);
svc_i3c_restore_regs(master);
return 0;
}
static const struct dev_pm_ops svc_i3c_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
svc_i3c_runtime_resume, NULL)
};
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
{ .compatible = "silvaco,i3c-master" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
static struct platform_driver svc_i3c_master = {
.probe = svc_i3c_master_probe,
.remove_new = svc_i3c_master_remove,
.driver = {
.name = "silvaco-i3c-master",
.of_match_table = svc_i3c_master_of_match_tbl,
.pm = &svc_i3c_pm_ops,
},
};
module_platform_driver(svc_i3c_master);
MODULE_AUTHOR("Conor Culhane <[email protected]>");
MODULE_AUTHOR("Miquel Raynal <[email protected]>");
MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/i3c/master/svc-i3c-master.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Cadence Design Systems Inc.
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
#define CONF_STATUS0 0x4
#define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
#define CONF_STATUS0_ECC_CHK BIT(28)
#define CONF_STATUS0_INTEG_CHK BIT(27)
#define CONF_STATUS0_CSR_DAP_CHK BIT(26)
#define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
#define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
#define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
#define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
#define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
#define CONF_STATUS0_SUPPORTS_DDR BIT(5)
#define CONF_STATUS0_SEC_MASTER BIT(4)
#define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
#define CONF_STATUS1 0x8
#define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
#define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
#define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
#define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
#define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
#define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
#define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
#define REV_ID 0xc
#define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
#define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
#define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
#define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
#define CTRL 0x10
#define CTRL_DEV_EN BIT(31)
#define CTRL_HALT_EN BIT(30)
#define CTRL_MCS BIT(29)
#define CTRL_MCS_EN BIT(28)
#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
#define CTRL_HJ_DISEC BIT(8)
#define CTRL_MST_ACK BIT(7)
#define CTRL_HJ_ACK BIT(6)
#define CTRL_HJ_INIT BIT(5)
#define CTRL_MST_INIT BIT(4)
#define CTRL_AHDR_OPT BIT(3)
#define CTRL_PURE_BUS_MODE 0
#define CTRL_MIXED_FAST_BUS_MODE 2
#define CTRL_MIXED_SLOW_BUS_MODE 3
#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
#define THD_DELAY_MAX 3
#define PRESCL_CTRL0 0x14
#define PRESCL_CTRL0_I2C(x) ((x) << 16)
#define PRESCL_CTRL0_I3C(x) (x)
#define PRESCL_CTRL0_MAX GENMASK(9, 0)
#define PRESCL_CTRL1 0x18
#define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
#define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
#define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
#define PRESCL_CTRL1_OD_LOW(x) (x)
#define MST_IER 0x20
#define MST_IDR 0x24
#define MST_IMR 0x28
#define MST_ICR 0x2c
#define MST_ISR 0x30
#define MST_INT_HALTED BIT(18)
#define MST_INT_MR_DONE BIT(17)
#define MST_INT_IMM_COMP BIT(16)
#define MST_INT_TX_THR BIT(15)
#define MST_INT_TX_OVF BIT(14)
#define MST_INT_IBID_THR BIT(12)
#define MST_INT_IBID_UNF BIT(11)
#define MST_INT_IBIR_THR BIT(10)
#define MST_INT_IBIR_UNF BIT(9)
#define MST_INT_IBIR_OVF BIT(8)
#define MST_INT_RX_THR BIT(7)
#define MST_INT_RX_UNF BIT(6)
#define MST_INT_CMDD_EMP BIT(5)
#define MST_INT_CMDD_THR BIT(4)
#define MST_INT_CMDD_OVF BIT(3)
#define MST_INT_CMDR_THR BIT(2)
#define MST_INT_CMDR_UNF BIT(1)
#define MST_INT_CMDR_OVF BIT(0)
#define MST_STATUS0 0x34
#define MST_STATUS0_IDLE BIT(18)
#define MST_STATUS0_HALTED BIT(17)
#define MST_STATUS0_MASTER_MODE BIT(16)
#define MST_STATUS0_TX_FULL BIT(13)
#define MST_STATUS0_IBID_FULL BIT(12)
#define MST_STATUS0_IBIR_FULL BIT(11)
#define MST_STATUS0_RX_FULL BIT(10)
#define MST_STATUS0_CMDD_FULL BIT(9)
#define MST_STATUS0_CMDR_FULL BIT(8)
#define MST_STATUS0_TX_EMP BIT(5)
#define MST_STATUS0_IBID_EMP BIT(4)
#define MST_STATUS0_IBIR_EMP BIT(3)
#define MST_STATUS0_RX_EMP BIT(2)
#define MST_STATUS0_CMDD_EMP BIT(1)
#define MST_STATUS0_CMDR_EMP BIT(0)
#define CMDR 0x38
#define CMDR_NO_ERROR 0
#define CMDR_DDR_PREAMBLE_ERROR 1
#define CMDR_DDR_PARITY_ERROR 2
#define CMDR_DDR_RX_FIFO_OVF 3
#define CMDR_DDR_TX_FIFO_UNF 4
#define CMDR_M0_ERROR 5
#define CMDR_M1_ERROR 6
#define CMDR_M2_ERROR 7
#define CMDR_MST_ABORT 8
#define CMDR_NACK_RESP 9
#define CMDR_INVALID_DA 10
#define CMDR_DDR_DROPPED 11
#define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
#define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
#define CMDR_CMDID_HJACK_DISEC 0xfe
#define CMDR_CMDID_HJACK_ENTDAA 0xff
#define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
#define IBIR 0x3c
#define IBIR_ACKED BIT(12)
#define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
#define IBIR_ERROR BIT(7)
#define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
#define IBIR_TYPE_IBI 0
#define IBIR_TYPE_HJ 1
#define IBIR_TYPE_MR 2
#define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
#define SLV_IER 0x40
#define SLV_IDR 0x44
#define SLV_IMR 0x48
#define SLV_ICR 0x4c
#define SLV_ISR 0x50
#define SLV_INT_TM BIT(20)
#define SLV_INT_ERROR BIT(19)
#define SLV_INT_EVENT_UP BIT(18)
#define SLV_INT_HJ_DONE BIT(17)
#define SLV_INT_MR_DONE BIT(16)
#define SLV_INT_DA_UPD BIT(15)
#define SLV_INT_SDR_FAIL BIT(14)
#define SLV_INT_DDR_FAIL BIT(13)
#define SLV_INT_M_RD_ABORT BIT(12)
#define SLV_INT_DDR_RX_THR BIT(11)
#define SLV_INT_DDR_TX_THR BIT(10)
#define SLV_INT_SDR_RX_THR BIT(9)
#define SLV_INT_SDR_TX_THR BIT(8)
#define SLV_INT_DDR_RX_UNF BIT(7)
#define SLV_INT_DDR_TX_OVF BIT(6)
#define SLV_INT_SDR_RX_UNF BIT(5)
#define SLV_INT_SDR_TX_OVF BIT(4)
#define SLV_INT_DDR_RD_COMP BIT(3)
#define SLV_INT_DDR_WR_COMP BIT(2)
#define SLV_INT_SDR_RD_COMP BIT(1)
#define SLV_INT_SDR_WR_COMP BIT(0)
#define SLV_STATUS0 0x54
#define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
#define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
#define SLV_STATUS1 0x58
#define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
#define SLV_STATUS1_VEN_TM BIT(19)
#define SLV_STATUS1_HJ_DIS BIT(18)
#define SLV_STATUS1_MR_DIS BIT(17)
#define SLV_STATUS1_PROT_ERR BIT(16)
#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
#define SLV_STATUS1_HAS_DA BIT(8)
#define SLV_STATUS1_DDR_RX_FULL BIT(7)
#define SLV_STATUS1_DDR_TX_FULL BIT(6)
#define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
#define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
#define SLV_STATUS1_SDR_RX_FULL BIT(3)
#define SLV_STATUS1_SDR_TX_FULL BIT(2)
#define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
#define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
#define CMD0_FIFO 0x60
#define CMD0_FIFO_IS_DDR BIT(31)
#define CMD0_FIFO_IS_CCC BIT(30)
#define CMD0_FIFO_BCH BIT(29)
#define XMIT_BURST_STATIC_SUBADDR 0
#define XMIT_SINGLE_INC_SUBADDR 1
#define XMIT_SINGLE_STATIC_SUBADDR 2
#define XMIT_BURST_WITHOUT_SUBADDR 3
#define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
#define CMD0_FIFO_SBCA BIT(26)
#define CMD0_FIFO_RSBC BIT(25)
#define CMD0_FIFO_IS_10B BIT(24)
#define CMD0_FIFO_PL_LEN(l) ((l) << 12)
#define CMD0_FIFO_PL_LEN_MAX 4095
#define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
#define CMD0_FIFO_RNW BIT(0)
#define CMD1_FIFO 0x64
#define CMD1_FIFO_CMDID(id) ((id) << 24)
#define CMD1_FIFO_CSRADDR(a) (a)
#define CMD1_FIFO_CCC(id) (id)
#define TX_FIFO 0x68
#define IMD_CMD0 0x70
#define IMD_CMD0_PL_LEN(l) ((l) << 12)
#define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
#define IMD_CMD0_RNW BIT(0)
#define IMD_CMD1 0x74
#define IMD_CMD1_CCC(id) (id)
#define IMD_DATA 0x78
#define RX_FIFO 0x80
#define IBI_DATA_FIFO 0x84
#define SLV_DDR_TX_FIFO 0x88
#define SLV_DDR_RX_FIFO 0x8c
#define CMD_IBI_THR_CTRL 0x90
#define IBIR_THR(t) ((t) << 24)
#define CMDR_THR(t) ((t) << 16)
#define IBI_THR(t) ((t) << 8)
#define CMD_THR(t) (t)
#define TX_RX_THR_CTRL 0x94
#define RX_THR(t) ((t) << 16)
#define TX_THR(t) (t)
#define SLV_DDR_TX_RX_THR_CTRL 0x98
#define SLV_DDR_RX_THR(t) ((t) << 16)
#define SLV_DDR_TX_THR(t) (t)
#define FLUSH_CTRL 0x9c
#define FLUSH_IBI_RESP BIT(23)
#define FLUSH_CMD_RESP BIT(22)
#define FLUSH_SLV_DDR_RX_FIFO BIT(22)
#define FLUSH_SLV_DDR_TX_FIFO BIT(21)
#define FLUSH_IMM_FIFO BIT(20)
#define FLUSH_IBI_FIFO BIT(19)
#define FLUSH_RX_FIFO BIT(18)
#define FLUSH_TX_FIFO BIT(17)
#define FLUSH_CMD_FIFO BIT(16)
#define TTO_PRESCL_CTRL0 0xb0
#define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
#define TTO_PRESCL_CTRL0_DIVA(x) (x)
#define TTO_PRESCL_CTRL1 0xb4
#define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
#define TTO_PRESCL_CTRL1_DIVA(x) (x)
#define DEVS_CTRL 0xb8
#define DEVS_CTRL_DEV_CLR_SHIFT 16
#define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
#define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
#define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
#define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
#define MAX_DEVS 16
#define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
#define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
#define DEV_ID_RR0_HDR_CAP BIT(10)
#define DEV_ID_RR0_IS_I3C BIT(9)
#define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
#define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
(((a) & GENMASK(9, 7)) << 6))
#define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
(((x) >> 6) & GENMASK(9, 7)))
#define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
#define DEV_ID_RR1_PID_MSB(pid) (pid)
#define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
#define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
#define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
#define DEV_ID_RR2_DCR(dcr) (dcr)
#define DEV_ID_RR2_LVR(lvr) (lvr)
#define SIR_MAP(x) (0x180 + ((x) * 4))
#define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
#define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
#define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
#define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
#define DEV_ROLE_SLAVE 0
#define DEV_ROLE_MASTER 1
#define SIR_MAP_DEV_ROLE(role) ((role) << 14)
#define SIR_MAP_DEV_SLOW BIT(13)
#define SIR_MAP_DEV_PL(l) ((l) << 8)
#define SIR_MAP_PL_MAX GENMASK(4, 0)
#define SIR_MAP_DEV_DA(a) ((a) << 1)
#define SIR_MAP_DEV_ACK BIT(0)
#define GPIR_WORD(x) (0x200 + ((x) * 4))
#define GPI_REG(val, id) \
(((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
#define GPOR_WORD(x) (0x220 + ((x) * 4))
#define GPO_REG(val, id) \
(((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
#define ASF_INT_STATUS 0x300
#define ASF_INT_RAW_STATUS 0x304
#define ASF_INT_MASK 0x308
#define ASF_INT_TEST 0x30c
#define ASF_INT_FATAL_SELECT 0x310
#define ASF_INTEGRITY_ERR BIT(6)
#define ASF_PROTOCOL_ERR BIT(5)
#define ASF_TRANS_TIMEOUT_ERR BIT(4)
#define ASF_CSR_ERR BIT(3)
#define ASF_DAP_ERR BIT(2)
#define ASF_SRAM_UNCORR_ERR BIT(1)
#define ASF_SRAM_CORR_ERR BIT(0)
#define ASF_SRAM_CORR_FAULT_STATUS 0x320
#define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
#define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
#define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
#define ASF_SRAM_FAULT_STATS 0x328
#define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
#define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
#define ASF_TRANS_TOUT_CTRL 0x330
#define ASF_TRANS_TOUT_EN BIT(31)
#define ASF_TRANS_TOUT_VAL(x) (x)
#define ASF_TRANS_TOUT_FAULT_MASK 0x334
#define ASF_TRANS_TOUT_FAULT_STATUS 0x338
#define ASF_TRANS_TOUT_FAULT_APB BIT(3)
#define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
#define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
#define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
#define ASF_PROTO_FAULT_MASK 0x340
#define ASF_PROTO_FAULT_STATUS 0x344
#define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
#define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
#define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
#define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
#define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
#define ASF_PROTO_FAULT_M(x) BIT(x)
struct cdns_i3c_master_caps {
u32 cmdfifodepth;
u32 cmdrfifodepth;
u32 txfifodepth;
u32 rxfifodepth;
u32 ibirfifodepth;
};
struct cdns_i3c_cmd {
u32 cmd0;
u32 cmd1;
u32 tx_len;
const void *tx_buf;
u32 rx_len;
void *rx_buf;
u32 error;
};
struct cdns_i3c_xfer {
struct list_head node;
struct completion comp;
int ret;
unsigned int ncmds;
struct cdns_i3c_cmd cmds[];
};
struct cdns_i3c_data {
u8 thd_delay_ns;
};
struct cdns_i3c_master {
struct work_struct hj_work;
struct i3c_master_controller base;
u32 free_rr_slots;
unsigned int maxdevs;
struct {
unsigned int num_slots;
struct i3c_dev_desc **slots;
spinlock_t lock;
} ibi;
struct {
struct list_head list;
struct cdns_i3c_xfer *cur;
spinlock_t lock;
} xferqueue;
void __iomem *regs;
struct clk *sysclk;
struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
const struct cdns_i3c_data *devdata;
};
static inline struct cdns_i3c_master *
to_cdns_i3c_master(struct i3c_master_controller *master)
{
return container_of(master, struct cdns_i3c_master, base);
}
static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
const u8 *bytes, int nbytes)
{
writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
if (nbytes & 3) {
u32 tmp = 0;
memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
writesl(master->regs + TX_FIFO, &tmp, 1);
}
}
static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
u8 *bytes, int nbytes)
{
readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
if (nbytes & 3) {
u32 tmp;
readsl(master->regs + RX_FIFO, &tmp, 1);
memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
}
}
static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
const struct i3c_ccc_cmd *cmd)
{
if (cmd->ndests > 1)
return false;
switch (cmd->id) {
case I3C_CCC_ENEC(true):
case I3C_CCC_ENEC(false):
case I3C_CCC_DISEC(true):
case I3C_CCC_DISEC(false):
case I3C_CCC_ENTAS(0, true):
case I3C_CCC_ENTAS(0, false):
case I3C_CCC_RSTDAA(true):
case I3C_CCC_RSTDAA(false):
case I3C_CCC_ENTDAA:
case I3C_CCC_SETMWL(true):
case I3C_CCC_SETMWL(false):
case I3C_CCC_SETMRL(true):
case I3C_CCC_SETMRL(false):
case I3C_CCC_DEFSLVS:
case I3C_CCC_ENTHDR(0):
case I3C_CCC_SETDASA:
case I3C_CCC_SETNEWDA:
case I3C_CCC_GETMWL:
case I3C_CCC_GETMRL:
case I3C_CCC_GETPID:
case I3C_CCC_GETBCR:
case I3C_CCC_GETDCR:
case I3C_CCC_GETSTATUS:
case I3C_CCC_GETACCMST:
case I3C_CCC_GETMXDS:
case I3C_CCC_GETHDRCAP:
return true;
default:
break;
}
return false;
}
static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
{
u32 status;
writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
return readl_poll_timeout(master->regs + MST_STATUS0, status,
status & MST_STATUS0_IDLE, 10, 1000000);
}
static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
{
writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
}
static struct cdns_i3c_xfer *
cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
{
struct cdns_i3c_xfer *xfer;
xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
if (!xfer)
return NULL;
INIT_LIST_HEAD(&xfer->node);
xfer->ncmds = ncmds;
xfer->ret = -ETIMEDOUT;
return xfer;
}
static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
{
kfree(xfer);
}
static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
{
struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
unsigned int i;
if (!xfer)
return;
writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
for (i = 0; i < xfer->ncmds; i++) {
struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
cmd->tx_len);
}
for (i = 0; i < xfer->ncmds; i++) {
struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
master->regs + CMD1_FIFO);
writel(cmd->cmd0, master->regs + CMD0_FIFO);
}
writel(readl(master->regs + CTRL) | CTRL_MCS,
master->regs + CTRL);
writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
}
static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
u32 isr)
{
struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
int i, ret = 0;
u32 status0;
if (!xfer)
return;
if (!(isr & MST_INT_CMDD_EMP))
return;
writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
for (status0 = readl(master->regs + MST_STATUS0);
!(status0 & MST_STATUS0_CMDR_EMP);
status0 = readl(master->regs + MST_STATUS0)) {
struct cdns_i3c_cmd *cmd;
u32 cmdr, rx_len, id;
cmdr = readl(master->regs + CMDR);
id = CMDR_CMDID(cmdr);
if (id == CMDR_CMDID_HJACK_DISEC ||
id == CMDR_CMDID_HJACK_ENTDAA ||
WARN_ON(id >= xfer->ncmds))
continue;
cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
cmd->error = CMDR_ERROR(cmdr);
}
for (i = 0; i < xfer->ncmds; i++) {
switch (xfer->cmds[i].error) {
case CMDR_NO_ERROR:
break;
case CMDR_DDR_PREAMBLE_ERROR:
case CMDR_DDR_PARITY_ERROR:
case CMDR_M0_ERROR:
case CMDR_M1_ERROR:
case CMDR_M2_ERROR:
case CMDR_MST_ABORT:
case CMDR_NACK_RESP:
case CMDR_DDR_DROPPED:
ret = -EIO;
break;
case CMDR_DDR_RX_FIFO_OVF:
case CMDR_DDR_TX_FIFO_UNF:
ret = -ENOSPC;
break;
case CMDR_INVALID_DA:
default:
ret = -EINVAL;
break;
}
}
xfer->ret = ret;
complete(&xfer->comp);
xfer = list_first_entry_or_null(&master->xferqueue.list,
struct cdns_i3c_xfer, node);
if (xfer)
list_del_init(&xfer->node);
master->xferqueue.cur = xfer;
cdns_i3c_master_start_xfer_locked(master);
}
static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
struct cdns_i3c_xfer *xfer)
{
unsigned long flags;
init_completion(&xfer->comp);
spin_lock_irqsave(&master->xferqueue.lock, flags);
if (master->xferqueue.cur) {
list_add_tail(&xfer->node, &master->xferqueue.list);
} else {
master->xferqueue.cur = xfer;
cdns_i3c_master_start_xfer_locked(master);
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
struct cdns_i3c_xfer *xfer)
{
unsigned long flags;
spin_lock_irqsave(&master->xferqueue.lock, flags);
if (master->xferqueue.cur == xfer) {
u32 status;
writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
master->regs + CTRL);
readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
status & MST_STATUS0_IDLE, 10,
1000000);
master->xferqueue.cur = NULL;
writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
FLUSH_CMD_RESP,
master->regs + FLUSH_CTRL);
writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
master->regs + CTRL);
} else {
list_del_init(&xfer->node);
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
{
switch (cmd->error) {
case CMDR_M0_ERROR:
return I3C_ERROR_M0;
case CMDR_M1_ERROR:
return I3C_ERROR_M1;
case CMDR_M2_ERROR:
case CMDR_NACK_RESP:
return I3C_ERROR_M2;
default:
break;
}
return I3C_ERROR_UNKNOWN;
}
static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
struct i3c_ccc_cmd *cmd)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_xfer *xfer;
struct cdns_i3c_cmd *ccmd;
int ret;
xfer = cdns_i3c_master_alloc_xfer(master, 1);
if (!xfer)
return -ENOMEM;
ccmd = xfer->cmds;
ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
ccmd->cmd0 = CMD0_FIFO_IS_CCC |
CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
if (cmd->id & I3C_CCC_DIRECT)
ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
if (cmd->rnw) {
ccmd->cmd0 |= CMD0_FIFO_RNW;
ccmd->rx_buf = cmd->dests[0].payload.data;
ccmd->rx_len = cmd->dests[0].payload.len;
} else {
ccmd->tx_buf = cmd->dests[0].payload.data;
ccmd->tx_len = cmd->dests[0].payload.len;
}
cdns_i3c_master_queue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
cdns_i3c_master_unqueue_xfer(master, xfer);
ret = xfer->ret;
cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
cdns_i3c_master_free_xfer(xfer);
return ret;
}
static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *xfers,
int nxfers)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
int txslots = 0, rxslots = 0, i, ret;
struct cdns_i3c_xfer *cdns_xfer;
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
return -ENOTSUPP;
}
if (!nxfers)
return 0;
if (nxfers > master->caps.cmdfifodepth ||
nxfers > master->caps.cmdrfifodepth)
return -ENOTSUPP;
/*
* First make sure that all transactions (block of transfers separated
* by a STOP marker) fit in the FIFOs.
*/
for (i = 0; i < nxfers; i++) {
if (xfers[i].rnw)
rxslots += DIV_ROUND_UP(xfers[i].len, 4);
else
txslots += DIV_ROUND_UP(xfers[i].len, 4);
}
if (rxslots > master->caps.rxfifodepth ||
txslots > master->caps.txfifodepth)
return -ENOTSUPP;
cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!cdns_xfer)
return -ENOMEM;
for (i = 0; i < nxfers; i++) {
struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
u32 pl_len = xfers[i].len;
ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
if (xfers[i].rnw) {
ccmd->cmd0 |= CMD0_FIFO_RNW;
ccmd->rx_buf = xfers[i].data.in;
ccmd->rx_len = xfers[i].len;
pl_len++;
} else {
ccmd->tx_buf = xfers[i].data.out;
ccmd->tx_len = xfers[i].len;
}
ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
if (i < nxfers - 1)
ccmd->cmd0 |= CMD0_FIFO_RSBC;
if (!i)
ccmd->cmd0 |= CMD0_FIFO_BCH;
}
cdns_i3c_master_queue_xfer(master, cdns_xfer);
if (!wait_for_completion_timeout(&cdns_xfer->comp,
msecs_to_jiffies(1000)))
cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
ret = cdns_xfer->ret;
for (i = 0; i < nxfers; i++)
xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
cdns_i3c_master_free_xfer(cdns_xfer);
return ret;
}
static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
const struct i2c_msg *xfers, int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
unsigned int nrxwords = 0, ntxwords = 0;
struct cdns_i3c_xfer *xfer;
int i, ret = 0;
if (nxfers > master->caps.cmdfifodepth)
return -ENOTSUPP;
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
return -ENOTSUPP;
if (xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
else
ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
}
if (ntxwords > master->caps.txfifodepth ||
nrxwords > master->caps.rxfifodepth)
return -ENOTSUPP;
xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
return -ENOMEM;
for (i = 0; i < nxfers; i++) {
struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
CMD0_FIFO_PL_LEN(xfers[i].len) |
CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
if (xfers[i].flags & I2C_M_TEN)
ccmd->cmd0 |= CMD0_FIFO_IS_10B;
if (xfers[i].flags & I2C_M_RD) {
ccmd->cmd0 |= CMD0_FIFO_RNW;
ccmd->rx_buf = xfers[i].buf;
ccmd->rx_len = xfers[i].len;
} else {
ccmd->tx_buf = xfers[i].buf;
ccmd->tx_len = xfers[i].len;
}
}
cdns_i3c_master_queue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
cdns_i3c_master_unqueue_xfer(master, xfer);
ret = xfer->ret;
cdns_i3c_master_free_xfer(xfer);
return ret;
}
struct cdns_i3c_i2c_dev_data {
u16 id;
s16 ibi;
struct i3c_generic_ibi_pool *ibi_pool;
};
static u32 prepare_rr0_dev_address(u32 addr)
{
u32 ret = (addr << 1) & 0xff;
/* RR0[7:1] = addr[6:0] */
ret |= (addr & GENMASK(6, 0)) << 1;
/* RR0[15:13] = addr[9:7] */
ret |= (addr & GENMASK(9, 7)) << 6;
/* RR0[0] = ~XOR(addr[6:0]) */
if (!(hweight8(addr & 0x7f) & 1))
ret |= 1;
return ret;
}
static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
u32 rr;
rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
dev->info.dyn_addr :
dev->info.static_addr);
writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
}
static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
u8 dyn_addr)
{
unsigned long activedevs;
u32 rr;
int i;
if (!dyn_addr) {
if (!master->free_rr_slots)
return -ENOSPC;
return ffs(master->free_rr_slots) - 1;
}
activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
activedevs &= ~BIT(0);
for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
rr = readl(master->regs + DEV_ID_RR0(i));
if (!(rr & DEV_ID_RR0_IS_I3C) ||
DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
continue;
return i;
}
return -EINVAL;
}
static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
u8 old_dyn_addr)
{
cdns_i3c_master_upd_i3c_addr(dev);
return 0;
}
static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data;
int slot;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
if (slot < 0) {
kfree(data);
return slot;
}
data->ibi = -1;
data->id = slot;
i3c_dev_set_master_data(dev, data);
master->free_rr_slots &= ~BIT(slot);
if (!dev->info.dyn_addr) {
cdns_i3c_master_upd_i3c_addr(dev);
writel(readl(master->regs + DEVS_CTRL) |
DEVS_CTRL_DEV_ACTIVE(data->id),
master->regs + DEVS_CTRL);
}
return 0;
}
static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
writel(readl(master->regs + DEVS_CTRL) |
DEVS_CTRL_DEV_CLR(data->id),
master->regs + DEVS_CTRL);
i3c_dev_set_master_data(dev, NULL);
master->free_rr_slots |= BIT(data->id);
kfree(data);
}
static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data;
int slot;
slot = cdns_i3c_master_get_rr_slot(master, 0);
if (slot < 0)
return slot;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->id = slot;
master->free_rr_slots &= ~BIT(slot);
i2c_dev_set_master_data(dev, data);
writel(prepare_rr0_dev_address(dev->addr),
master->regs + DEV_ID_RR0(data->id));
writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
writel(readl(master->regs + DEVS_CTRL) |
DEVS_CTRL_DEV_ACTIVE(data->id),
master->regs + DEVS_CTRL);
return 0;
}
static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
writel(readl(master->regs + DEVS_CTRL) |
DEVS_CTRL_DEV_CLR(data->id),
master->regs + DEVS_CTRL);
master->free_rr_slots |= BIT(data->id);
i2c_dev_set_master_data(dev, NULL);
kfree(data);
}
static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
cdns_i3c_master_disable(master);
}
static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
unsigned int slot,
struct i3c_device_info *info)
{
u32 rr;
memset(info, 0, sizeof(*info));
rr = readl(master->regs + DEV_ID_RR0(slot));
info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
rr = readl(master->regs + DEV_ID_RR2(slot));
info->dcr = rr;
info->bcr = rr >> 8;
info->pid = rr >> 16;
info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
}
static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
{
struct i3c_master_controller *m = &master->base;
unsigned long i3c_lim_period, pres_step, ncycles;
struct i3c_bus *bus = i3c_master_get_bus(m);
unsigned long new_i3c_scl_lim = 0;
struct i3c_dev_desc *dev;
u32 prescl1, ctrl;
i3c_bus_for_each_i3cdev(bus, dev) {
unsigned long max_fscl;
max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
switch (max_fscl) {
case I3C_SDR1_FSCL_8MHZ:
max_fscl = 8000000;
break;
case I3C_SDR2_FSCL_6MHZ:
max_fscl = 6000000;
break;
case I3C_SDR3_FSCL_4MHZ:
max_fscl = 4000000;
break;
case I3C_SDR4_FSCL_2MHZ:
max_fscl = 2000000;
break;
case I3C_SDR0_FSCL_MAX:
default:
max_fscl = 0;
break;
}
if (max_fscl &&
(new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
new_i3c_scl_lim = max_fscl;
}
/* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
if (new_i3c_scl_lim == master->i3c_scl_lim)
return;
master->i3c_scl_lim = new_i3c_scl_lim;
if (!new_i3c_scl_lim)
return;
pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
/* Configure PP_LOW to meet I3C slave limitations. */
prescl1 = readl(master->regs + PRESCL_CTRL1) &
~PRESCL_CTRL1_PP_LOW_MASK;
ctrl = readl(master->regs + CTRL);
i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
if (ncycles < 4)
ncycles = 0;
else
ncycles -= 4;
prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
/* Disable I3C master before updating PRESCL_CTRL1. */
if (ctrl & CTRL_DEV_EN)
cdns_i3c_master_disable(master);
writel(prescl1, master->regs + PRESCL_CTRL1);
if (ctrl & CTRL_DEV_EN)
cdns_i3c_master_enable(master);
}
static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
unsigned long olddevs, newdevs;
int ret, slot;
u8 addrs[MAX_DEVS] = { };
u8 last_addr = 0;
olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
olddevs |= BIT(0);
/* Prepare RR slots before launching DAA. */
for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
ret = i3c_master_get_free_addr(m, last_addr + 1);
if (ret < 0)
return -ENOSPC;
last_addr = ret;
addrs[slot] = last_addr;
writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
master->regs + DEV_ID_RR0(slot));
writel(0, master->regs + DEV_ID_RR1(slot));
writel(0, master->regs + DEV_ID_RR2(slot));
}
ret = i3c_master_entdaa_locked(&master->base);
if (ret && ret != I3C_ERROR_M2)
return ret;
newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
newdevs &= ~olddevs;
/*
* Clear all retaining registers filled during DAA. We already
* have the addressed assigned to them in the addrs array.
*/
for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
i3c_master_add_i3c_dev_locked(m, addrs[slot]);
/*
* Clear slots that ended up not being used. Can be caused by I3C
* device creation failure or when the I3C device was already known
* by the system but with a different address (in this case the device
* already has a slot and does not need a new one).
*/
writel(readl(master->regs + DEVS_CTRL) |
master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
master->regs + DEVS_CTRL);
i3c_master_defslvs_locked(&master->base);
cdns_i3c_master_upd_i3c_scl_lim(master);
/* Unmask Hot-Join and Mastership request interrupts. */
i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
return 0;
}
static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
{
unsigned long sysclk_rate = clk_get_rate(master->sysclk);
u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
(NSEC_PER_SEC / sysclk_rate));
/* Every value greater than 3 is not valid. */
if (thd_delay > THD_DELAY_MAX)
thd_delay = THD_DELAY_MAX;
/* CTLR_THD_DEL value is encoded. */
return (THD_DELAY_MAX - thd_delay);
}
static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
unsigned long pres_step, sysclk_rate, max_i2cfreq;
struct i3c_bus *bus = i3c_master_get_bus(m);
u32 ctrl, prescl0, prescl1, pres, low;
struct i3c_device_info info = { };
int ret, ncycles;
switch (bus->mode) {
case I3C_BUS_MODE_PURE:
ctrl = CTRL_PURE_BUS_MODE;
break;
case I3C_BUS_MODE_MIXED_FAST:
ctrl = CTRL_MIXED_FAST_BUS_MODE;
break;
case I3C_BUS_MODE_MIXED_SLOW:
ctrl = CTRL_MIXED_SLOW_BUS_MODE;
break;
default:
return -EINVAL;
}
sysclk_rate = clk_get_rate(master->sysclk);
if (!sysclk_rate)
return -EINVAL;
pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
if (pres > PRESCL_CTRL0_MAX)
return -ERANGE;
bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
prescl0 = PRESCL_CTRL0_I3C(pres);
low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
prescl1 = PRESCL_CTRL1_OD_LOW(low);
max_i2cfreq = bus->scl_rate.i2c;
pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
if (pres > PRESCL_CTRL0_MAX)
return -ERANGE;
bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
prescl0 |= PRESCL_CTRL0_I2C(pres);
writel(prescl0, master->regs + PRESCL_CTRL0);
/* Calculate OD and PP low. */
pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
if (ncycles < 0)
ncycles = 0;
prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
writel(prescl1, master->regs + PRESCL_CTRL1);
/* Get an address for the master. */
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
return ret;
writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
master->regs + DEV_ID_RR0(0));
cdns_i3c_master_dev_rr_to_info(master, 0, &info);
if (info.bcr & I3C_BCR_HDR_CAP)
info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
ret = i3c_master_set_info(&master->base, &info);
if (ret)
return ret;
/*
* Enable Hot-Join, and, when a Hot-Join request happens, disable all
* events coming from this device.
*
* We will issue ENTDAA afterwards from the threaded IRQ handler.
*/
ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
/*
* Configure data hold delay based on device-specific data.
*
* MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
* master output. This setting allows to meet this timing on master's
* SoC outputs, regardless of PCB balancing.
*/
ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
writel(ctrl, master->regs + CTRL);
cdns_i3c_master_enable(master);
return 0;
}
static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
u32 ibir)
{
struct cdns_i3c_i2c_dev_data *data;
bool data_consumed = false;
struct i3c_ibi_slot *slot;
u32 id = IBIR_SLVID(ibir);
struct i3c_dev_desc *dev;
size_t nbytes;
u8 *buf;
/*
* FIXME: maybe we should report the FIFO OVF errors to the upper
* layer.
*/
if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
goto out;
dev = master->ibi.slots[id];
spin_lock(&master->ibi.lock);
data = i3c_dev_get_master_data(dev);
slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
if (!slot)
goto out_unlock;
buf = slot->data;
nbytes = IBIR_XFER_BYTES(ibir);
readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
if (nbytes % 3) {
u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
}
slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
dev->ibi->max_payload_len);
i3c_master_queue_ibi(dev, slot);
data_consumed = true;
out_unlock:
spin_unlock(&master->ibi.lock);
out:
/* Consume data from the FIFO if it's not been done already. */
if (!data_consumed) {
int i;
for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
readl(master->regs + IBI_DATA_FIFO);
}
}
static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
{
u32 status0;
writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
for (status0 = readl(master->regs + MST_STATUS0);
!(status0 & MST_STATUS0_IBIR_EMP);
status0 = readl(master->regs + MST_STATUS0)) {
u32 ibir = readl(master->regs + IBIR);
switch (IBIR_TYPE(ibir)) {
case IBIR_TYPE_IBI:
cdns_i3c_master_handle_ibi(master, ibir);
break;
case IBIR_TYPE_HJ:
WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
queue_work(master->base.wq, &master->hj_work);
break;
case IBIR_TYPE_MR:
WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
break;
default:
break;
}
}
}
static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
{
struct cdns_i3c_master *master = data;
u32 status;
status = readl(master->regs + MST_ISR);
if (!(status & readl(master->regs + MST_IMR)))
return IRQ_NONE;
spin_lock(&master->xferqueue.lock);
cdns_i3c_master_end_xfer_locked(master, status);
spin_unlock(&master->xferqueue.lock);
if (status & MST_INT_IBIR_THR)
cnds_i3c_master_demux_ibis(master);
return IRQ_HANDLED;
}
static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
unsigned long flags;
u32 sirmap;
int ret;
ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
I3C_CCC_EVENT_SIR);
if (ret)
return ret;
spin_lock_irqsave(&master->ibi.lock, flags);
sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
sirmap |= SIR_MAP_DEV_CONF(data->ibi,
SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
spin_unlock_irqrestore(&master->ibi.lock, flags);
return ret;
}
static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
unsigned long flags;
u32 sircfg, sirmap;
int ret;
spin_lock_irqsave(&master->ibi.lock, flags);
sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
SIR_MAP_DEV_DA(dev->info.dyn_addr) |
SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
SIR_MAP_DEV_ACK;
if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
sircfg |= SIR_MAP_DEV_SLOW;
sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
spin_unlock_irqrestore(&master->ibi.lock, flags);
ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
I3C_CCC_EVENT_SIR);
if (ret) {
spin_lock_irqsave(&master->ibi.lock, flags);
sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
sirmap |= SIR_MAP_DEV_CONF(data->ibi,
SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
spin_unlock_irqrestore(&master->ibi.lock, flags);
}
return ret;
}
static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
unsigned long flags;
unsigned int i;
data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
if (IS_ERR(data->ibi_pool))
return PTR_ERR(data->ibi_pool);
spin_lock_irqsave(&master->ibi.lock, flags);
for (i = 0; i < master->ibi.num_slots; i++) {
if (!master->ibi.slots[i]) {
data->ibi = i;
master->ibi.slots[i] = dev;
break;
}
}
spin_unlock_irqrestore(&master->ibi.lock, flags);
if (i < master->ibi.num_slots)
return 0;
i3c_generic_ibi_free_pool(data->ibi_pool);
data->ibi_pool = NULL;
return -ENOSPC;
}
static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
unsigned long flags;
spin_lock_irqsave(&master->ibi.lock, flags);
master->ibi.slots[data->ibi] = NULL;
data->ibi = -1;
spin_unlock_irqrestore(&master->ibi.lock, flags);
i3c_generic_ibi_free_pool(data->ibi_pool);
}
static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
}
static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
.bus_init = cdns_i3c_master_bus_init,
.bus_cleanup = cdns_i3c_master_bus_cleanup,
.do_daa = cdns_i3c_master_do_daa,
.attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
.reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
.detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
.attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
.detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
.supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
.send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
.priv_xfers = cdns_i3c_master_priv_xfers,
.i2c_xfers = cdns_i3c_master_i2c_xfers,
.enable_ibi = cdns_i3c_master_enable_ibi,
.disable_ibi = cdns_i3c_master_disable_ibi,
.request_ibi = cdns_i3c_master_request_ibi,
.free_ibi = cdns_i3c_master_free_ibi,
.recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
};
static void cdns_i3c_master_hj(struct work_struct *work)
{
struct cdns_i3c_master *master = container_of(work,
struct cdns_i3c_master,
hj_work);
i3c_master_do_daa(&master->base);
}
static struct cdns_i3c_data cdns_i3c_devdata = {
.thd_delay_ns = 10,
};
static const struct of_device_id cdns_i3c_master_of_ids[] = {
{ .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
{ /* sentinel */ },
};
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
int ret, irq;
u32 val;
master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->devdata = of_device_get_match_data(&pdev->dev);
if (!master->devdata)
return -EINVAL;
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
master->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(master->pclk))
return PTR_ERR(master->pclk);
master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
if (IS_ERR(master->sysclk))
return PTR_ERR(master->sysclk);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = clk_prepare_enable(master->pclk);
if (ret)
return ret;
ret = clk_prepare_enable(master->sysclk);
if (ret)
goto err_disable_pclk;
if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
ret = -EINVAL;
goto err_disable_sysclk;
}
spin_lock_init(&master->xferqueue.lock);
INIT_LIST_HEAD(&master->xferqueue.list);
INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
writel(0xffffffff, master->regs + MST_IDR);
writel(0xffffffff, master->regs + SLV_IDR);
ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
dev_name(&pdev->dev), master);
if (ret)
goto err_disable_sysclk;
platform_set_drvdata(pdev, master);
val = readl(master->regs + CONF_STATUS0);
/* Device ID0 is reserved to describe this master. */
master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
master->free_rr_slots = GENMASK(master->maxdevs, 1);
val = readl(master->regs + CONF_STATUS1);
master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
spin_lock_init(&master->ibi.lock);
master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
sizeof(*master->ibi.slots),
GFP_KERNEL);
if (!master->ibi.slots) {
ret = -ENOMEM;
goto err_disable_sysclk;
}
writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
writel(MST_INT_IBIR_THR, master->regs + MST_IER);
writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
ret = i3c_master_register(&master->base, &pdev->dev,
&cdns_i3c_master_ops, false);
if (ret)
goto err_disable_sysclk;
return 0;
err_disable_sysclk:
clk_disable_unprepare(master->sysclk);
err_disable_pclk:
clk_disable_unprepare(master->pclk);
return ret;
}
static void cdns_i3c_master_remove(struct platform_device *pdev)
{
struct cdns_i3c_master *master = platform_get_drvdata(pdev);
i3c_master_unregister(&master->base);
clk_disable_unprepare(master->sysclk);
clk_disable_unprepare(master->pclk);
}
static struct platform_driver cdns_i3c_master = {
.probe = cdns_i3c_master_probe,
.remove_new = cdns_i3c_master_remove,
.driver = {
.name = "cdns-i3c-master",
.of_match_table = cdns_i3c_master_of_ids,
},
};
module_platform_driver(cdns_i3c_master);
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_DESCRIPTION("Cadence I3C master driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:cdns-i3c-master");
| linux-master | drivers/i3c/master/i3c-master-cdns.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Code Construct
*
* Author: Jeremy Kerr <[email protected]>
*/
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "dw-i3c-master.h"
/* AST2600-specific global register set */
#define AST2600_I3CG_REG0(idx) (((idx) * 4 * 4) + 0x10)
#define AST2600_I3CG_REG1(idx) (((idx) * 4 * 4) + 0x14)
#define AST2600_I3CG_REG0_SDA_PULLUP_EN_MASK GENMASK(29, 28)
#define AST2600_I3CG_REG0_SDA_PULLUP_EN_2K (0x0 << 28)
#define AST2600_I3CG_REG0_SDA_PULLUP_EN_750 (0x2 << 28)
#define AST2600_I3CG_REG0_SDA_PULLUP_EN_545 (0x3 << 28)
#define AST2600_I3CG_REG1_I2C_MODE BIT(0)
#define AST2600_I3CG_REG1_TEST_MODE BIT(1)
#define AST2600_I3CG_REG1_ACT_MODE_MASK GENMASK(3, 2)
#define AST2600_I3CG_REG1_ACT_MODE(x) (((x) << 2) & AST2600_I3CG_REG1_ACT_MODE_MASK)
#define AST2600_I3CG_REG1_PENDING_INT_MASK GENMASK(7, 4)
#define AST2600_I3CG_REG1_PENDING_INT(x) (((x) << 4) & AST2600_I3CG_REG1_PENDING_INT_MASK)
#define AST2600_I3CG_REG1_SA_MASK GENMASK(14, 8)
#define AST2600_I3CG_REG1_SA(x) (((x) << 8) & AST2600_I3CG_REG1_SA_MASK)
#define AST2600_I3CG_REG1_SA_EN BIT(15)
#define AST2600_I3CG_REG1_INST_ID_MASK GENMASK(19, 16)
#define AST2600_I3CG_REG1_INST_ID(x) (((x) << 16) & AST2600_I3CG_REG1_INST_ID_MASK)
#define AST2600_DEFAULT_SDA_PULLUP_OHMS 2000
#define DEV_ADDR_TABLE_IBI_PEC BIT(11)
struct ast2600_i3c {
struct dw_i3c_master dw;
struct regmap *global_regs;
unsigned int global_idx;
unsigned int sda_pullup;
};
static struct ast2600_i3c *to_ast2600_i3c(struct dw_i3c_master *dw)
{
return container_of(dw, struct ast2600_i3c, dw);
}
static int ast2600_i3c_pullup_to_reg(unsigned int ohms, u32 *regp)
{
u32 reg;
switch (ohms) {
case 2000:
reg = AST2600_I3CG_REG0_SDA_PULLUP_EN_2K;
break;
case 750:
reg = AST2600_I3CG_REG0_SDA_PULLUP_EN_750;
break;
case 545:
reg = AST2600_I3CG_REG0_SDA_PULLUP_EN_545;
break;
default:
return -EINVAL;
}
if (regp)
*regp = reg;
return 0;
}
static int ast2600_i3c_init(struct dw_i3c_master *dw)
{
struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
u32 reg = 0;
int rc;
/* reg0: set SDA pullup values */
rc = ast2600_i3c_pullup_to_reg(i3c->sda_pullup, ®);
if (rc)
return rc;
rc = regmap_write(i3c->global_regs,
AST2600_I3CG_REG0(i3c->global_idx), reg);
if (rc)
return rc;
/* reg1: set up the instance id, but leave everything else disabled,
* as it's all for client mode
*/
reg = AST2600_I3CG_REG1_INST_ID(i3c->global_idx);
rc = regmap_write(i3c->global_regs,
AST2600_I3CG_REG1(i3c->global_idx), reg);
return rc;
}
static void ast2600_i3c_set_dat_ibi(struct dw_i3c_master *i3c,
struct i3c_dev_desc *dev,
bool enable, u32 *dat)
{
/*
* The ast2600 i3c controller will lock up on receiving 4n+1-byte IBIs
* if the PEC is disabled. We have no way to restrict the length of
* IBIs sent to the controller, so we need to unconditionally enable
* PEC checking, which means we drop a byte of payload data
*/
if (enable && dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
dev_warn_once(&i3c->base.dev,
"Enabling PEC workaround. IBI payloads will be truncated\n");
*dat |= DEV_ADDR_TABLE_IBI_PEC;
}
}
static const struct dw_i3c_platform_ops ast2600_i3c_ops = {
.init = ast2600_i3c_init,
.set_dat_ibi = ast2600_i3c_set_dat_ibi,
};
static int ast2600_i3c_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct of_phandle_args gspec;
struct ast2600_i3c *i3c;
int rc;
i3c = devm_kzalloc(&pdev->dev, sizeof(*i3c), GFP_KERNEL);
if (!i3c)
return -ENOMEM;
rc = of_parse_phandle_with_fixed_args(np, "aspeed,global-regs", 1, 0,
&gspec);
if (rc)
return -ENODEV;
i3c->global_regs = syscon_node_to_regmap(gspec.np);
of_node_put(gspec.np);
if (IS_ERR(i3c->global_regs))
return PTR_ERR(i3c->global_regs);
i3c->global_idx = gspec.args[0];
rc = of_property_read_u32(np, "sda-pullup-ohms", &i3c->sda_pullup);
if (rc)
i3c->sda_pullup = AST2600_DEFAULT_SDA_PULLUP_OHMS;
rc = ast2600_i3c_pullup_to_reg(i3c->sda_pullup, NULL);
if (rc)
dev_err(&pdev->dev, "invalid sda-pullup value %d\n",
i3c->sda_pullup);
i3c->dw.platform_ops = &ast2600_i3c_ops;
i3c->dw.ibi_capable = true;
return dw_i3c_common_probe(&i3c->dw, pdev);
}
static void ast2600_i3c_remove(struct platform_device *pdev)
{
struct dw_i3c_master *dw_i3c = platform_get_drvdata(pdev);
dw_i3c_common_remove(dw_i3c);
}
static const struct of_device_id ast2600_i3c_master_of_match[] = {
{ .compatible = "aspeed,ast2600-i3c", },
{},
};
MODULE_DEVICE_TABLE(of, ast2600_i3c_master_of_match);
static struct platform_driver ast2600_i3c_driver = {
.probe = ast2600_i3c_probe,
.remove_new = ast2600_i3c_remove,
.driver = {
.name = "ast2600-i3c-master",
.of_match_table = ast2600_i3c_master_of_match,
},
};
module_platform_driver(ast2600_i3c_driver);
MODULE_AUTHOR("Jeremy Kerr <[email protected]>");
MODULE_DESCRIPTION("ASPEED AST2600 I3C driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/i3c/master/ast2600-i3c-master.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
*
* Author: Vitor Soares <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include "dw-i3c-master.h"
#define DEVICE_CTRL 0x0
#define DEV_CTRL_ENABLE BIT(31)
#define DEV_CTRL_RESUME BIT(30)
#define DEV_CTRL_HOT_JOIN_NACK BIT(8)
#define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
#define DEVICE_ADDR 0x4
#define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
#define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16))
#define HW_CAPABILITY 0x8
#define COMMAND_QUEUE_PORT 0xc
#define COMMAND_PORT_TOC BIT(30)
#define COMMAND_PORT_READ_TRANSFER BIT(28)
#define COMMAND_PORT_SDAP BIT(27)
#define COMMAND_PORT_ROC BIT(26)
#define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21))
#define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16))
#define COMMAND_PORT_CP BIT(15)
#define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7))
#define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3))
#define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16))
#define COMMAND_PORT_ARG_DATA_LEN_MAX 65536
#define COMMAND_PORT_TRANSFER_ARG 0x01
#define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24))
#define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16))
#define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8))
#define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5)
#define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4)
#define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3)
#define COMMAND_PORT_SHORT_DATA_ARG 0x02
#define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21))
#define COMMAND_PORT_ADDR_ASSGN_CMD 0x03
#define RESPONSE_QUEUE_PORT 0x10
#define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28)
#define RESPONSE_NO_ERROR 0
#define RESPONSE_ERROR_CRC 1
#define RESPONSE_ERROR_PARITY 2
#define RESPONSE_ERROR_FRAME 3
#define RESPONSE_ERROR_IBA_NACK 4
#define RESPONSE_ERROR_ADDRESS_NACK 5
#define RESPONSE_ERROR_OVER_UNDER_FLOW 6
#define RESPONSE_ERROR_TRANSF_ABORT 8
#define RESPONSE_ERROR_I2C_W_NACK_ERR 9
#define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24)
#define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0))
#define RX_TX_DATA_PORT 0x14
#define IBI_QUEUE_STATUS 0x18
#define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8)
#define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0))
#define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1)
#define IBI_QUEUE_IBI_RNW(x) (IBI_QUEUE_STATUS_IBI_ID(x) & BIT(0))
#define IBI_TYPE_MR(x) \
((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x))
#define IBI_TYPE_HJ(x) \
((IBI_QUEUE_IBI_ADDR(x) == I3C_HOT_JOIN_ADDR) && !IBI_QUEUE_IBI_RNW(x))
#define IBI_TYPE_SIRQ(x) \
((IBI_QUEUE_IBI_ADDR(x) != I3C_HOT_JOIN_ADDR) && IBI_QUEUE_IBI_RNW(x))
#define QUEUE_THLD_CTRL 0x1c
#define QUEUE_THLD_CTRL_IBI_STAT_MASK GENMASK(31, 24)
#define QUEUE_THLD_CTRL_IBI_STAT(x) (((x) - 1) << 24)
#define QUEUE_THLD_CTRL_IBI_DATA_MASK GENMASK(20, 16)
#define QUEUE_THLD_CTRL_IBI_DATA(x) ((x) << 16)
#define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
#define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
#define DATA_BUFFER_THLD_CTRL 0x20
#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
#define IBI_QUEUE_CTRL 0x24
#define IBI_MR_REQ_REJECT 0x2C
#define IBI_SIR_REQ_REJECT 0x30
#define IBI_REQ_REJECT_ALL GENMASK(31, 0)
#define RESET_CTRL 0x34
#define RESET_CTRL_IBI_QUEUE BIT(5)
#define RESET_CTRL_RX_FIFO BIT(4)
#define RESET_CTRL_TX_FIFO BIT(3)
#define RESET_CTRL_RESP_QUEUE BIT(2)
#define RESET_CTRL_CMD_QUEUE BIT(1)
#define RESET_CTRL_SOFT BIT(0)
#define SLV_EVENT_CTRL 0x38
#define INTR_STATUS 0x3c
#define INTR_STATUS_EN 0x40
#define INTR_SIGNAL_EN 0x44
#define INTR_FORCE 0x48
#define INTR_BUSOWNER_UPDATE_STAT BIT(13)
#define INTR_IBI_UPDATED_STAT BIT(12)
#define INTR_READ_REQ_RECV_STAT BIT(11)
#define INTR_DEFSLV_STAT BIT(10)
#define INTR_TRANSFER_ERR_STAT BIT(9)
#define INTR_DYN_ADDR_ASSGN_STAT BIT(8)
#define INTR_CCC_UPDATED_STAT BIT(6)
#define INTR_TRANSFER_ABORT_STAT BIT(5)
#define INTR_RESP_READY_STAT BIT(4)
#define INTR_CMD_QUEUE_READY_STAT BIT(3)
#define INTR_IBI_THLD_STAT BIT(2)
#define INTR_RX_THLD_STAT BIT(1)
#define INTR_TX_THLD_STAT BIT(0)
#define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \
INTR_IBI_UPDATED_STAT | \
INTR_READ_REQ_RECV_STAT | \
INTR_DEFSLV_STAT | \
INTR_TRANSFER_ERR_STAT | \
INTR_DYN_ADDR_ASSGN_STAT | \
INTR_CCC_UPDATED_STAT | \
INTR_TRANSFER_ABORT_STAT | \
INTR_RESP_READY_STAT | \
INTR_CMD_QUEUE_READY_STAT | \
INTR_IBI_THLD_STAT | \
INTR_TX_THLD_STAT | \
INTR_RX_THLD_STAT)
#define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
INTR_RESP_READY_STAT)
#define QUEUE_STATUS_LEVEL 0x4c
#define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
#define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
#define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8)
#define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0))
#define DATA_BUFFER_STATUS_LEVEL 0x50
#define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0))
#define PRESENT_STATE 0x54
#define CCC_DEVICE_STATUS 0x58
#define DEVICE_ADDR_TABLE_POINTER 0x5c
#define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16)
#define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0))
#define DEV_CHAR_TABLE_POINTER 0x60
#define VENDOR_SPECIFIC_REG_POINTER 0x6c
#define SLV_PID_VALUE 0x74
#define SLV_CHAR_CTRL 0x78
#define SLV_MAX_LEN 0x7c
#define MAX_READ_TURNAROUND 0x80
#define MAX_DATA_SPEED 0x84
#define SLV_DEBUG_STATUS 0x88
#define SLV_INTR_REQ 0x8c
#define DEVICE_CTRL_EXTENDED 0xb0
#define SCL_I3C_OD_TIMING 0xb4
#define SCL_I3C_PP_TIMING 0xb8
#define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
#define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0))
#define SCL_I3C_TIMING_CNT_MIN 5
#define SCL_I2C_FM_TIMING 0xbc
#define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16))
#define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
#define SCL_I2C_FMP_TIMING 0xc0
#define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
#define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
#define SCL_EXT_LCNT_TIMING 0xc8
#define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24))
#define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16))
#define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8))
#define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
#define SCL_EXT_TERMN_LCNT_TIMING 0xcc
#define BUS_FREE_TIMING 0xd4
#define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
#define BUS_IDLE_TIMING 0xd8
#define I3C_VER_ID 0xe0
#define I3C_VER_TYPE 0xe4
#define EXTENDED_CAPABILITY 0xe8
#define SLAVE_CONFIG 0xec
#define DEV_ADDR_TABLE_IBI_MDB BIT(12)
#define DEV_ADDR_TABLE_SIR_REJECT BIT(13)
#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
#define I3C_BUS_SDR1_SCL_RATE 8000000
#define I3C_BUS_SDR2_SCL_RATE 6000000
#define I3C_BUS_SDR3_SCL_RATE 4000000
#define I3C_BUS_SDR4_SCL_RATE 2000000
#define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
#define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
#define I3C_BUS_THIGH_MAX_NS 41
#define XFER_TIMEOUT (msecs_to_jiffies(1000))
struct dw_i3c_cmd {
u32 cmd_lo;
u32 cmd_hi;
u16 tx_len;
const void *tx_buf;
u16 rx_len;
void *rx_buf;
u8 error;
};
struct dw_i3c_xfer {
struct list_head node;
struct completion comp;
int ret;
unsigned int ncmds;
struct dw_i3c_cmd cmds[];
};
struct dw_i3c_i2c_dev_data {
u8 index;
struct i3c_generic_ibi_pool *ibi_pool;
};
static u8 even_parity(u8 p)
{
p ^= p >> 4;
p &= 0xf;
return (0x9669 >> p) & 1;
}
static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
const struct i3c_ccc_cmd *cmd)
{
if (cmd->ndests > 1)
return false;
switch (cmd->id) {
case I3C_CCC_ENEC(true):
case I3C_CCC_ENEC(false):
case I3C_CCC_DISEC(true):
case I3C_CCC_DISEC(false):
case I3C_CCC_ENTAS(0, true):
case I3C_CCC_ENTAS(0, false):
case I3C_CCC_RSTDAA(true):
case I3C_CCC_RSTDAA(false):
case I3C_CCC_ENTDAA:
case I3C_CCC_SETMWL(true):
case I3C_CCC_SETMWL(false):
case I3C_CCC_SETMRL(true):
case I3C_CCC_SETMRL(false):
case I3C_CCC_ENTHDR(0):
case I3C_CCC_SETDASA:
case I3C_CCC_SETNEWDA:
case I3C_CCC_GETMWL:
case I3C_CCC_GETMRL:
case I3C_CCC_GETPID:
case I3C_CCC_GETBCR:
case I3C_CCC_GETDCR:
case I3C_CCC_GETSTATUS:
case I3C_CCC_GETMXDS:
case I3C_CCC_GETHDRCAP:
return true;
default:
return false;
}
}
static inline struct dw_i3c_master *
to_dw_i3c_master(struct i3c_master_controller *master)
{
return container_of(master, struct dw_i3c_master, base);
}
static void dw_i3c_master_disable(struct dw_i3c_master *master)
{
writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
master->regs + DEVICE_CTRL);
}
static void dw_i3c_master_enable(struct dw_i3c_master *master)
{
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
master->regs + DEVICE_CTRL);
}
static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
{
int pos;
for (pos = 0; pos < master->maxdevs; pos++) {
if (addr == master->devs[pos].addr)
return pos;
}
return -EINVAL;
}
static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
{
if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0)))
return -ENOSPC;
return ffs(master->free_pos) - 1;
}
static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
const u8 *bytes, int nbytes)
{
writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
if (nbytes & 3) {
u32 tmp = 0;
memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
}
}
static void dw_i3c_master_read_fifo(struct dw_i3c_master *master,
int reg, u8 *bytes, int nbytes)
{
readsl(master->regs + reg, bytes, nbytes / 4);
if (nbytes & 3) {
u32 tmp;
readsl(master->regs + reg, &tmp, 1);
memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
}
}
static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
u8 *bytes, int nbytes)
{
return dw_i3c_master_read_fifo(master, RX_TX_DATA_PORT, bytes, nbytes);
}
static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master,
u8 *bytes, int nbytes)
{
return dw_i3c_master_read_fifo(master, IBI_QUEUE_STATUS, bytes, nbytes);
}
static struct dw_i3c_xfer *
dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
{
struct dw_i3c_xfer *xfer;
xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
if (!xfer)
return NULL;
INIT_LIST_HEAD(&xfer->node);
xfer->ncmds = ncmds;
xfer->ret = -ETIMEDOUT;
return xfer;
}
static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer)
{
kfree(xfer);
}
static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master)
{
struct dw_i3c_xfer *xfer = master->xferqueue.cur;
unsigned int i;
u32 thld_ctrl;
if (!xfer)
return;
for (i = 0; i < xfer->ncmds; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
}
thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds);
writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
for (i = 0; i < xfer->ncmds; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT);
writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
}
}
static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
struct dw_i3c_xfer *xfer)
{
unsigned long flags;
init_completion(&xfer->comp);
spin_lock_irqsave(&master->xferqueue.lock, flags);
if (master->xferqueue.cur) {
list_add_tail(&xfer->node, &master->xferqueue.list);
} else {
master->xferqueue.cur = xfer;
dw_i3c_master_start_xfer_locked(master);
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
struct dw_i3c_xfer *xfer)
{
if (master->xferqueue.cur == xfer) {
u32 status;
master->xferqueue.cur = NULL;
writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO |
RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE,
master->regs + RESET_CTRL);
readl_poll_timeout_atomic(master->regs + RESET_CTRL, status,
!status, 10, 1000000);
} else {
list_del_init(&xfer->node);
}
}
static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
struct dw_i3c_xfer *xfer)
{
unsigned long flags;
spin_lock_irqsave(&master->xferqueue.lock, flags);
dw_i3c_master_dequeue_xfer_locked(master, xfer);
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
{
struct dw_i3c_xfer *xfer = master->xferqueue.cur;
int i, ret = 0;
u32 nresp;
if (!xfer)
return;
nresp = readl(master->regs + QUEUE_STATUS_LEVEL);
nresp = QUEUE_STATUS_LEVEL_RESP(nresp);
for (i = 0; i < nresp; i++) {
struct dw_i3c_cmd *cmd;
u32 resp;
resp = readl(master->regs + RESPONSE_QUEUE_PORT);
cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)];
cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp);
cmd->error = RESPONSE_PORT_ERR_STATUS(resp);
if (cmd->rx_len && !cmd->error)
dw_i3c_master_read_rx_fifo(master, cmd->rx_buf,
cmd->rx_len);
}
for (i = 0; i < nresp; i++) {
switch (xfer->cmds[i].error) {
case RESPONSE_NO_ERROR:
break;
case RESPONSE_ERROR_PARITY:
case RESPONSE_ERROR_IBA_NACK:
case RESPONSE_ERROR_TRANSF_ABORT:
case RESPONSE_ERROR_CRC:
case RESPONSE_ERROR_FRAME:
ret = -EIO;
break;
case RESPONSE_ERROR_OVER_UNDER_FLOW:
ret = -ENOSPC;
break;
case RESPONSE_ERROR_I2C_W_NACK_ERR:
case RESPONSE_ERROR_ADDRESS_NACK:
default:
ret = -EINVAL;
break;
}
}
xfer->ret = ret;
complete(&xfer->comp);
if (ret < 0) {
dw_i3c_master_dequeue_xfer_locked(master, xfer);
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
master->regs + DEVICE_CTRL);
}
xfer = list_first_entry_or_null(&master->xferqueue.list,
struct dw_i3c_xfer,
node);
if (xfer)
list_del_init(&xfer->node);
master->xferqueue.cur = xfer;
dw_i3c_master_start_xfer_locked(master);
}
static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
{
unsigned long core_rate, core_period;
u32 scl_timing;
u8 hcnt, lcnt;
core_rate = clk_get_rate(master->core_clk);
if (!core_rate)
return -EINVAL;
core_period = DIV_ROUND_UP(1000000000, core_rate);
hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
if (hcnt < SCL_I3C_TIMING_CNT_MIN)
hcnt = SCL_I3C_TIMING_CNT_MIN;
lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt;
if (lcnt < SCL_I3C_TIMING_CNT_MIN)
lcnt = SCL_I3C_TIMING_CNT_MIN;
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
/*
* In pure i3c mode, MST_FREE represents tCAS. In shared mode, this
* will be set up by dw_i2c_clk_cfg as tLOW.
*/
if (master->base.bus.mode == I3C_BUS_MODE_PURE)
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
lcnt = max_t(u8,
DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt);
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
scl_timing = SCL_EXT_LCNT_1(lcnt);
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt;
scl_timing |= SCL_EXT_LCNT_2(lcnt);
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt;
scl_timing |= SCL_EXT_LCNT_3(lcnt);
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
scl_timing |= SCL_EXT_LCNT_4(lcnt);
writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
return 0;
}
static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
{
unsigned long core_rate, core_period;
u16 hcnt, lcnt;
u32 scl_timing;
core_rate = clk_get_rate(master->core_clk);
if (!core_rate)
return -EINVAL;
core_period = DIV_ROUND_UP(1000000000, core_rate);
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
SCL_I2C_FMP_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
SCL_I2C_FM_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
master->regs + DEVICE_CTRL);
return 0;
}
static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct dw_i3c_master *master = to_dw_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
struct i3c_device_info info = { };
u32 thld_ctrl;
int ret;
ret = master->platform_ops->init(master);
if (ret)
return ret;
switch (bus->mode) {
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
ret = dw_i2c_clk_cfg(master);
if (ret)
return ret;
fallthrough;
case I3C_BUS_MODE_PURE:
ret = dw_i3c_clk_cfg(master);
if (ret)
return ret;
break;
default:
return -EINVAL;
}
thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK |
QUEUE_THLD_CTRL_IBI_STAT_MASK |
QUEUE_THLD_CTRL_IBI_STAT_MASK);
thld_ctrl |= QUEUE_THLD_CTRL_IBI_STAT(1) |
QUEUE_THLD_CTRL_IBI_DATA(31);
writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
writel(INTR_ALL, master->regs + INTR_STATUS);
writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
return ret;
writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
master->regs + DEVICE_ADDR);
memset(&info, 0, sizeof(info));
info.dyn_addr = ret;
ret = i3c_master_set_info(&master->base, &info);
if (ret)
return ret;
writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
/* For now don't support Hot-Join */
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
master->regs + DEVICE_CTRL);
dw_i3c_master_enable(master);
return 0;
}
static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
{
struct dw_i3c_master *master = to_dw_i3c_master(m);
dw_i3c_master_disable(master);
}
static int dw_i3c_ccc_set(struct dw_i3c_master *master,
struct i3c_ccc_cmd *ccc)
{
struct dw_i3c_xfer *xfer;
struct dw_i3c_cmd *cmd;
int ret, pos = 0;
if (ccc->id & I3C_CCC_DIRECT) {
pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
if (pos < 0)
return pos;
}
xfer = dw_i3c_master_alloc_xfer(master, 1);
if (!xfer)
return -ENOMEM;
cmd = xfer->cmds;
cmd->tx_buf = ccc->dests[0].payload.data;
cmd->tx_len = ccc->dests[0].payload.len;
cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
COMMAND_PORT_TRANSFER_ARG;
cmd->cmd_lo = COMMAND_PORT_CP |
COMMAND_PORT_DEV_INDEX(pos) |
COMMAND_PORT_CMD(ccc->id) |
COMMAND_PORT_TOC |
COMMAND_PORT_ROC;
dw_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
ccc->err = I3C_ERROR_M2;
dw_i3c_master_free_xfer(xfer);
return ret;
}
static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
{
struct dw_i3c_xfer *xfer;
struct dw_i3c_cmd *cmd;
int ret, pos;
pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
if (pos < 0)
return pos;
xfer = dw_i3c_master_alloc_xfer(master, 1);
if (!xfer)
return -ENOMEM;
cmd = xfer->cmds;
cmd->rx_buf = ccc->dests[0].payload.data;
cmd->rx_len = ccc->dests[0].payload.len;
cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
COMMAND_PORT_TRANSFER_ARG;
cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
COMMAND_PORT_CP |
COMMAND_PORT_DEV_INDEX(pos) |
COMMAND_PORT_CMD(ccc->id) |
COMMAND_PORT_TOC |
COMMAND_PORT_ROC;
dw_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
ccc->err = I3C_ERROR_M2;
dw_i3c_master_free_xfer(xfer);
return ret;
}
static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
struct i3c_ccc_cmd *ccc)
{
struct dw_i3c_master *master = to_dw_i3c_master(m);
int ret = 0;
if (ccc->id == I3C_CCC_ENTDAA)
return -EINVAL;
if (ccc->rnw)
ret = dw_i3c_ccc_get(master, ccc);
else
ret = dw_i3c_ccc_set(master, ccc);
return ret;
}
static int dw_i3c_master_daa(struct i3c_master_controller *m)
{
struct dw_i3c_master *master = to_dw_i3c_master(m);
struct dw_i3c_xfer *xfer;
struct dw_i3c_cmd *cmd;
u32 olddevs, newdevs;
u8 p, last_addr = 0;
int ret, pos;
olddevs = ~(master->free_pos);
/* Prepare DAT before launching DAA. */
for (pos = 0; pos < master->maxdevs; pos++) {
if (olddevs & BIT(pos))
continue;
ret = i3c_master_get_free_addr(m, last_addr + 1);
if (ret < 0)
return -ENOSPC;
master->devs[pos].addr = ret;
p = even_parity(ret);
last_addr = ret;
ret |= (p << 7);
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
}
xfer = dw_i3c_master_alloc_xfer(master, 1);
if (!xfer)
return -ENOMEM;
pos = dw_i3c_master_get_free_pos(master);
if (pos < 0) {
dw_i3c_master_free_xfer(xfer);
return pos;
}
cmd = &xfer->cmds[0];
cmd->cmd_hi = 0x1;
cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
COMMAND_PORT_DEV_INDEX(pos) |
COMMAND_PORT_CMD(I3C_CCC_ENTDAA) |
COMMAND_PORT_ADDR_ASSGN_CMD |
COMMAND_PORT_TOC |
COMMAND_PORT_ROC;
dw_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0);
newdevs &= ~olddevs;
for (pos = 0; pos < master->maxdevs; pos++) {
if (newdevs & BIT(pos))
i3c_master_add_i3c_dev_locked(m, master->devs[pos].addr);
}
dw_i3c_master_free_xfer(xfer);
return 0;
}
static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *i3c_xfers,
int i3c_nxfers)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
unsigned int nrxwords = 0, ntxwords = 0;
struct dw_i3c_xfer *xfer;
int i, ret = 0;
if (!i3c_nxfers)
return 0;
if (i3c_nxfers > master->caps.cmdfifodepth)
return -ENOTSUPP;
for (i = 0; i < i3c_nxfers; i++) {
if (i3c_xfers[i].rnw)
nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
else
ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
}
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
return -ENOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
if (!xfer)
return -ENOMEM;
for (i = 0; i < i3c_nxfers; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) |
COMMAND_PORT_TRANSFER_ARG;
if (i3c_xfers[i].rnw) {
cmd->rx_buf = i3c_xfers[i].data.in;
cmd->rx_len = i3c_xfers[i].len;
cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
COMMAND_PORT_SPEED(dev->info.max_read_ds);
} else {
cmd->tx_buf = i3c_xfers[i].data.out;
cmd->tx_len = i3c_xfers[i].len;
cmd->cmd_lo =
COMMAND_PORT_SPEED(dev->info.max_write_ds);
}
cmd->cmd_lo |= COMMAND_PORT_TID(i) |
COMMAND_PORT_DEV_INDEX(data->index) |
COMMAND_PORT_ROC;
if (i == (i3c_nxfers - 1))
cmd->cmd_lo |= COMMAND_PORT_TOC;
}
dw_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
for (i = 0; i < i3c_nxfers; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
if (i3c_xfers[i].rnw)
i3c_xfers[i].len = cmd->rx_len;
}
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
return ret;
}
static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
u8 old_dyn_addr)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
int pos;
pos = dw_i3c_master_get_free_pos(master);
if (data->index > pos && pos > 0) {
writel(0,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
master->devs[data->index].addr = 0;
master->free_pos |= BIT(data->index);
data->index = pos;
master->devs[pos].addr = dev->info.dyn_addr;
master->free_pos &= ~BIT(pos);
}
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
master->devs[data->index].addr = dev->info.dyn_addr;
return 0;
}
static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
struct dw_i3c_i2c_dev_data *data;
int pos;
pos = dw_i3c_master_get_free_pos(master);
if (pos < 0)
return pos;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->index = pos;
master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr;
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
return 0;
}
static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
writel(0,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
i3c_dev_set_master_data(dev, NULL);
master->devs[data->index].addr = 0;
master->free_pos |= BIT(data->index);
kfree(data);
}
static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
const struct i2c_msg *i2c_xfers,
int i2c_nxfers)
{
struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
unsigned int nrxwords = 0, ntxwords = 0;
struct dw_i3c_xfer *xfer;
int i, ret = 0;
if (!i2c_nxfers)
return 0;
if (i2c_nxfers > master->caps.cmdfifodepth)
return -ENOTSUPP;
for (i = 0; i < i2c_nxfers; i++) {
if (i2c_xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
else
ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
}
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
return -ENOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
if (!xfer)
return -ENOMEM;
for (i = 0; i < i2c_nxfers; i++) {
struct dw_i3c_cmd *cmd = &xfer->cmds[i];
cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) |
COMMAND_PORT_TRANSFER_ARG;
cmd->cmd_lo = COMMAND_PORT_TID(i) |
COMMAND_PORT_DEV_INDEX(data->index) |
COMMAND_PORT_ROC;
if (i2c_xfers[i].flags & I2C_M_RD) {
cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER;
cmd->rx_buf = i2c_xfers[i].buf;
cmd->rx_len = i2c_xfers[i].len;
} else {
cmd->tx_buf = i2c_xfers[i].buf;
cmd->tx_len = i2c_xfers[i].len;
}
if (i == (i2c_nxfers - 1))
cmd->cmd_lo |= COMMAND_PORT_TOC;
}
dw_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
return ret;
}
static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
struct dw_i3c_i2c_dev_data *data;
int pos;
pos = dw_i3c_master_get_free_pos(master);
if (pos < 0)
return pos;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->index = pos;
master->devs[pos].addr = dev->addr;
master->free_pos &= ~BIT(pos);
i2c_dev_set_master_data(dev, data);
writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
DEV_ADDR_TABLE_STATIC_ADDR(dev->addr),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
return 0;
}
static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
{
struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
writel(0,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
i2c_dev_set_master_data(dev, NULL);
master->devs[data->index].addr = 0;
master->free_pos |= BIT(data->index);
kfree(data);
}
static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
unsigned long flags;
data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
if (IS_ERR(data->ibi_pool))
return PTR_ERR(data->ibi_pool);
spin_lock_irqsave(&master->devs_lock, flags);
master->devs[data->index].ibi_dev = dev;
spin_unlock_irqrestore(&master->devs_lock, flags);
return 0;
}
static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
unsigned long flags;
spin_lock_irqsave(&master->devs_lock, flags);
master->devs[data->index].ibi_dev = NULL;
spin_unlock_irqrestore(&master->devs_lock, flags);
i3c_generic_ibi_free_pool(data->ibi_pool);
data->ibi_pool = NULL;
}
static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
struct i3c_dev_desc *dev,
u8 idx, bool enable)
{
unsigned long flags;
u32 dat_entry, reg;
bool global;
dat_entry = DEV_ADDR_TABLE_LOC(master->datstartaddr, idx);
spin_lock_irqsave(&master->devs_lock, flags);
reg = readl(master->regs + dat_entry);
if (enable) {
reg &= ~DEV_ADDR_TABLE_SIR_REJECT;
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
reg |= DEV_ADDR_TABLE_IBI_MDB;
} else {
reg |= DEV_ADDR_TABLE_SIR_REJECT;
}
master->platform_ops->set_dat_ibi(master, dev, enable, ®);
writel(reg, master->regs + dat_entry);
reg = readl(master->regs + IBI_SIR_REQ_REJECT);
if (enable) {
global = reg == 0xffffffff;
reg &= ~BIT(idx);
} else {
global = reg == 0;
reg |= BIT(idx);
}
writel(reg, master->regs + IBI_SIR_REQ_REJECT);
if (global) {
reg = readl(master->regs + INTR_STATUS_EN);
reg &= ~INTR_IBI_THLD_STAT;
if (enable)
reg |= INTR_IBI_THLD_STAT;
writel(reg, master->regs + INTR_STATUS_EN);
reg = readl(master->regs + INTR_SIGNAL_EN);
reg &= ~INTR_IBI_THLD_STAT;
if (enable)
reg |= INTR_IBI_THLD_STAT;
writel(reg, master->regs + INTR_SIGNAL_EN);
}
spin_unlock_irqrestore(&master->devs_lock, flags);
}
static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
int rc;
dw_i3c_master_set_sir_enabled(master, dev, data->index, true);
rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
if (rc)
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
return rc;
}
static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
int rc;
rc = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
if (rc)
return rc;
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
return 0;
}
static void dw_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
}
static void dw_i3c_master_drain_ibi_queue(struct dw_i3c_master *master,
int len)
{
int i;
for (i = 0; i < DIV_ROUND_UP(len, 4); i++)
readl(master->regs + IBI_QUEUE_STATUS);
}
static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master,
u32 status)
{
struct dw_i3c_i2c_dev_data *data;
struct i3c_ibi_slot *slot;
struct i3c_dev_desc *dev;
unsigned long flags;
u8 addr, len;
int idx;
addr = IBI_QUEUE_IBI_ADDR(status);
len = IBI_QUEUE_STATUS_DATA_LEN(status);
/*
* We be tempted to check the error status in bit 30; however, due
* to the PEC errata workaround on some platform implementations (see
* ast2600_i3c_set_dat_ibi()), those will almost always have a PEC
* error on IBI payload data, as well as losing the last byte of
* payload.
*
* If we implement error status checking on that bit, we may need
* a new platform op to validate it.
*/
spin_lock_irqsave(&master->devs_lock, flags);
idx = dw_i3c_master_get_addr_pos(master, addr);
if (idx < 0) {
dev_dbg_ratelimited(&master->base.dev,
"IBI from unknown addr 0x%x\n", addr);
goto err_drain;
}
dev = master->devs[idx].ibi_dev;
if (!dev || !dev->ibi) {
dev_dbg_ratelimited(&master->base.dev,
"IBI from non-requested dev idx %d\n", idx);
goto err_drain;
}
data = i3c_dev_get_master_data(dev);
slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
if (!slot) {
dev_dbg_ratelimited(&master->base.dev,
"No IBI slots available\n");
goto err_drain;
}
if (dev->ibi->max_payload_len < len) {
dev_dbg_ratelimited(&master->base.dev,
"IBI payload len %d greater than max %d\n",
len, dev->ibi->max_payload_len);
goto err_drain;
}
if (len) {
dw_i3c_master_read_ibi_fifo(master, slot->data, len);
slot->len = len;
}
i3c_master_queue_ibi(dev, slot);
spin_unlock_irqrestore(&master->devs_lock, flags);
return;
err_drain:
dw_i3c_master_drain_ibi_queue(master, len);
spin_unlock_irqrestore(&master->devs_lock, flags);
}
/* "ibis": referring to In-Band Interrupts, and not
* https://en.wikipedia.org/wiki/Australian_white_ibis. The latter should
* not be handled.
*/
static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master)
{
unsigned int i, len, n_ibis;
u32 reg;
reg = readl(master->regs + QUEUE_STATUS_LEVEL);
n_ibis = QUEUE_STATUS_IBI_STATUS_CNT(reg);
if (!n_ibis)
return;
for (i = 0; i < n_ibis; i++) {
reg = readl(master->regs + IBI_QUEUE_STATUS);
if (IBI_TYPE_SIRQ(reg)) {
dw_i3c_master_handle_ibi_sir(master, reg);
} else {
len = IBI_QUEUE_STATUS_DATA_LEN(reg);
dev_info(&master->base.dev,
"unsupported IBI type 0x%lx len %d\n",
IBI_QUEUE_STATUS_IBI_ID(reg), len);
dw_i3c_master_drain_ibi_queue(master, len);
}
}
}
static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
{
struct dw_i3c_master *master = dev_id;
u32 status;
status = readl(master->regs + INTR_STATUS);
if (!(status & readl(master->regs + INTR_STATUS_EN))) {
writel(INTR_ALL, master->regs + INTR_STATUS);
return IRQ_NONE;
}
spin_lock(&master->xferqueue.lock);
dw_i3c_master_end_xfer_locked(master, status);
if (status & INTR_TRANSFER_ERR_STAT)
writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
spin_unlock(&master->xferqueue.lock);
if (status & INTR_IBI_THLD_STAT)
dw_i3c_master_irq_handle_ibis(master);
return IRQ_HANDLED;
}
static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
.bus_init = dw_i3c_master_bus_init,
.bus_cleanup = dw_i3c_master_bus_cleanup,
.attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
.reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
.detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
.do_daa = dw_i3c_master_daa,
.supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
.send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
.priv_xfers = dw_i3c_master_priv_xfers,
.attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
.detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
.i2c_xfers = dw_i3c_master_i2c_xfers,
};
static const struct i3c_master_controller_ops dw_mipi_i3c_ibi_ops = {
.bus_init = dw_i3c_master_bus_init,
.bus_cleanup = dw_i3c_master_bus_cleanup,
.attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
.reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
.detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
.do_daa = dw_i3c_master_daa,
.supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
.send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
.priv_xfers = dw_i3c_master_priv_xfers,
.attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
.detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
.i2c_xfers = dw_i3c_master_i2c_xfers,
.request_ibi = dw_i3c_master_request_ibi,
.free_ibi = dw_i3c_master_free_ibi,
.enable_ibi = dw_i3c_master_enable_ibi,
.disable_ibi = dw_i3c_master_disable_ibi,
.recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot,
};
/* default platform ops implementations */
static int dw_i3c_platform_init_nop(struct dw_i3c_master *i3c)
{
return 0;
}
static void dw_i3c_platform_set_dat_ibi_nop(struct dw_i3c_master *i3c,
struct i3c_dev_desc *dev,
bool enable, u32 *dat)
{
}
static const struct dw_i3c_platform_ops dw_i3c_platform_ops_default = {
.init = dw_i3c_platform_init_nop,
.set_dat_ibi = dw_i3c_platform_set_dat_ibi_nop,
};
int dw_i3c_common_probe(struct dw_i3c_master *master,
struct platform_device *pdev)
{
const struct i3c_master_controller_ops *ops;
int ret, irq;
if (!master->platform_ops)
master->platform_ops = &dw_i3c_platform_ops_default;
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
master->core_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(master->core_clk))
return PTR_ERR(master->core_clk);
master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
"core_rst");
if (IS_ERR(master->core_rst))
return PTR_ERR(master->core_rst);
ret = clk_prepare_enable(master->core_clk);
if (ret)
goto err_disable_core_clk;
reset_control_deassert(master->core_rst);
spin_lock_init(&master->xferqueue.lock);
INIT_LIST_HEAD(&master->xferqueue.list);
writel(INTR_ALL, master->regs + INTR_STATUS);
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
dw_i3c_master_irq_handler, 0,
dev_name(&pdev->dev), master);
if (ret)
goto err_assert_rst;
platform_set_drvdata(pdev, master);
/* Information regarding the FIFOs/QUEUEs depth */
ret = readl(master->regs + QUEUE_STATUS_LEVEL);
master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL);
master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret);
ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER);
master->datstartaddr = ret;
master->maxdevs = ret >> 16;
master->free_pos = GENMASK(master->maxdevs - 1, 0);
ops = &dw_mipi_i3c_ops;
if (master->ibi_capable)
ops = &dw_mipi_i3c_ibi_ops;
ret = i3c_master_register(&master->base, &pdev->dev, ops, false);
if (ret)
goto err_assert_rst;
return 0;
err_assert_rst:
reset_control_assert(master->core_rst);
err_disable_core_clk:
clk_disable_unprepare(master->core_clk);
return ret;
}
EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
void dw_i3c_common_remove(struct dw_i3c_master *master)
{
i3c_master_unregister(&master->base);
reset_control_assert(master->core_rst);
clk_disable_unprepare(master->core_clk);
}
EXPORT_SYMBOL_GPL(dw_i3c_common_remove);
/* base platform implementation */
static int dw_i3c_probe(struct platform_device *pdev)
{
struct dw_i3c_master *master;
master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
return dw_i3c_common_probe(master, pdev);
}
static void dw_i3c_remove(struct platform_device *pdev)
{
struct dw_i3c_master *master = platform_get_drvdata(pdev);
dw_i3c_common_remove(master);
}
static const struct of_device_id dw_i3c_master_of_match[] = {
{ .compatible = "snps,dw-i3c-master-1.00a", },
{},
};
MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
static struct platform_driver dw_i3c_driver = {
.probe = dw_i3c_probe,
.remove_new = dw_i3c_remove,
.driver = {
.name = "dw-i3c-master",
.of_match_table = dw_i3c_master_of_match,
},
};
module_platform_driver(dw_i3c_driver);
MODULE_AUTHOR("Vitor Soares <[email protected]>");
MODULE_DESCRIPTION("DesignWare MIPI I3C driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/i3c/master/dw-i3c-master.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*
* I3C HCI v1.0/v1.1 Command Descriptor Handling
*/
#include <linux/bitfield.h>
#include <linux/i3c/master.h>
#include "hci.h"
#include "cmd.h"
#include "dat.h"
#include "dct.h"
/*
* Address Assignment Command
*/
#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
#define CMD_A0_TOC W0_BIT_(31)
#define CMD_A0_ROC W0_BIT_(30)
#define CMD_A0_DEV_COUNT(v) FIELD_PREP(W0_MASK(29, 26), v)
#define CMD_A0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
#define CMD_A0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
/*
* Immediate Data Transfer Command
*/
#define CMD_0_ATTR_I FIELD_PREP(CMD_0_ATTR, 0x1)
#define CMD_I1_DATA_BYTE_4(v) FIELD_PREP(W1_MASK(63, 56), v)
#define CMD_I1_DATA_BYTE_3(v) FIELD_PREP(W1_MASK(55, 48), v)
#define CMD_I1_DATA_BYTE_2(v) FIELD_PREP(W1_MASK(47, 40), v)
#define CMD_I1_DATA_BYTE_1(v) FIELD_PREP(W1_MASK(39, 32), v)
#define CMD_I1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
#define CMD_I0_TOC W0_BIT_(31)
#define CMD_I0_ROC W0_BIT_(30)
#define CMD_I0_RNW W0_BIT_(29)
#define CMD_I0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
#define CMD_I0_DTT(v) FIELD_PREP(W0_MASK(25, 23), v)
#define CMD_I0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
#define CMD_I0_CP W0_BIT_(15)
#define CMD_I0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
#define CMD_I0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
/*
* Regular Data Transfer Command
*/
#define CMD_0_ATTR_R FIELD_PREP(CMD_0_ATTR, 0x0)
#define CMD_R1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
#define CMD_R1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
#define CMD_R0_TOC W0_BIT_(31)
#define CMD_R0_ROC W0_BIT_(30)
#define CMD_R0_RNW W0_BIT_(29)
#define CMD_R0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
#define CMD_R0_DBP W0_BIT_(25)
#define CMD_R0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
#define CMD_R0_CP W0_BIT_(15)
#define CMD_R0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
#define CMD_R0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
/*
* Combo Transfer (Write + Write/Read) Command
*/
#define CMD_0_ATTR_C FIELD_PREP(CMD_0_ATTR, 0x3)
#define CMD_C1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
#define CMD_C1_OFFSET(v) FIELD_PREP(W1_MASK(47, 32), v)
#define CMD_C0_TOC W0_BIT_(31)
#define CMD_C0_ROC W0_BIT_(30)
#define CMD_C0_RNW W0_BIT_(29)
#define CMD_C0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
#define CMD_C0_16_BIT_SUBOFFSET W0_BIT_(25)
#define CMD_C0_FIRST_PHASE_MODE W0_BIT_(24)
#define CMD_C0_DATA_LENGTH_POSITION(v) FIELD_PREP(W0_MASK(23, 22), v)
#define CMD_C0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
#define CMD_C0_CP W0_BIT_(15)
#define CMD_C0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
#define CMD_C0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
/*
* Internal Control Command
*/
#define CMD_0_ATTR_M FIELD_PREP(CMD_0_ATTR, 0x7)
#define CMD_M1_VENDOR_SPECIFIC W1_MASK(63, 32)
#define CMD_M0_MIPI_RESERVED W0_MASK(31, 12)
#define CMD_M0_MIPI_CMD W0_MASK(11, 8)
#define CMD_M0_VENDOR_INFO_PRESENT W0_BIT_( 7)
#define CMD_M0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
/* Data Transfer Speed and Mode */
enum hci_cmd_mode {
MODE_I3C_SDR0 = 0x0,
MODE_I3C_SDR1 = 0x1,
MODE_I3C_SDR2 = 0x2,
MODE_I3C_SDR3 = 0x3,
MODE_I3C_SDR4 = 0x4,
MODE_I3C_HDR_TSx = 0x5,
MODE_I3C_HDR_DDR = 0x6,
MODE_I3C_HDR_BT = 0x7,
MODE_I3C_Fm_FmP = 0x8,
MODE_I2C_Fm = 0x0,
MODE_I2C_FmP = 0x1,
MODE_I2C_UD1 = 0x2,
MODE_I2C_UD2 = 0x3,
MODE_I2C_UD3 = 0x4,
};
static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
{
struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
if (bus->scl_rate.i3c >= 12500000)
return MODE_I3C_SDR0;
if (bus->scl_rate.i3c > 8000000)
return MODE_I3C_SDR1;
if (bus->scl_rate.i3c > 6000000)
return MODE_I3C_SDR2;
if (bus->scl_rate.i3c > 4000000)
return MODE_I3C_SDR3;
if (bus->scl_rate.i3c > 2000000)
return MODE_I3C_SDR4;
return MODE_I3C_Fm_FmP;
}
static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
{
struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
if (bus->scl_rate.i2c >= 1000000)
return MODE_I2C_FmP;
return MODE_I2C_Fm;
}
static void fill_data_bytes(struct hci_xfer *xfer, u8 *data,
unsigned int data_len)
{
xfer->cmd_desc[1] = 0;
switch (data_len) {
case 4:
xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_4(data[3]);
fallthrough;
case 3:
xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_3(data[2]);
fallthrough;
case 2:
xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_2(data[1]);
fallthrough;
case 1:
xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_1(data[0]);
fallthrough;
case 0:
break;
}
/* we consumed all the data with the cmd descriptor */
xfer->data = NULL;
}
static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci,
struct hci_xfer *xfer,
u8 ccc_addr, u8 ccc_cmd, bool raw)
{
unsigned int dat_idx = 0;
enum hci_cmd_mode mode = get_i3c_mode(hci);
u8 *data = xfer->data;
unsigned int data_len = xfer->data_len;
bool rnw = xfer->rnw;
int ret;
/* this should never happen */
if (WARN_ON(raw))
return -EINVAL;
if (ccc_addr != I3C_BROADCAST_ADDR) {
ret = mipi_i3c_hci_dat_v1.get_index(hci, ccc_addr);
if (ret < 0)
return ret;
dat_idx = ret;
}
xfer->cmd_tid = hci_get_tid();
if (!rnw && data_len <= 4) {
/* we use an Immediate Data Transfer Command */
xfer->cmd_desc[0] =
CMD_0_ATTR_I |
CMD_I0_TID(xfer->cmd_tid) |
CMD_I0_CMD(ccc_cmd) | CMD_I0_CP |
CMD_I0_DEV_INDEX(dat_idx) |
CMD_I0_DTT(data_len) |
CMD_I0_MODE(mode);
fill_data_bytes(xfer, data, data_len);
} else {
/* we use a Regular Data Transfer Command */
xfer->cmd_desc[0] =
CMD_0_ATTR_R |
CMD_R0_TID(xfer->cmd_tid) |
CMD_R0_CMD(ccc_cmd) | CMD_R0_CP |
CMD_R0_DEV_INDEX(dat_idx) |
CMD_R0_MODE(mode) |
(rnw ? CMD_R0_RNW : 0);
xfer->cmd_desc[1] =
CMD_R1_DATA_LENGTH(data_len);
}
return 0;
}
static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci *hci,
struct i3c_dev_desc *dev,
struct hci_xfer *xfer)
{
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
unsigned int dat_idx = dev_data->dat_idx;
enum hci_cmd_mode mode = get_i3c_mode(hci);
u8 *data = xfer->data;
unsigned int data_len = xfer->data_len;
bool rnw = xfer->rnw;
xfer->cmd_tid = hci_get_tid();
if (!rnw && data_len <= 4) {
/* we use an Immediate Data Transfer Command */
xfer->cmd_desc[0] =
CMD_0_ATTR_I |
CMD_I0_TID(xfer->cmd_tid) |
CMD_I0_DEV_INDEX(dat_idx) |
CMD_I0_DTT(data_len) |
CMD_I0_MODE(mode);
fill_data_bytes(xfer, data, data_len);
} else {
/* we use a Regular Data Transfer Command */
xfer->cmd_desc[0] =
CMD_0_ATTR_R |
CMD_R0_TID(xfer->cmd_tid) |
CMD_R0_DEV_INDEX(dat_idx) |
CMD_R0_MODE(mode) |
(rnw ? CMD_R0_RNW : 0);
xfer->cmd_desc[1] =
CMD_R1_DATA_LENGTH(data_len);
}
}
static void hci_cmd_v1_prep_i2c_xfer(struct i3c_hci *hci,
struct i2c_dev_desc *dev,
struct hci_xfer *xfer)
{
struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
unsigned int dat_idx = dev_data->dat_idx;
enum hci_cmd_mode mode = get_i2c_mode(hci);
u8 *data = xfer->data;
unsigned int data_len = xfer->data_len;
bool rnw = xfer->rnw;
xfer->cmd_tid = hci_get_tid();
if (!rnw && data_len <= 4) {
/* we use an Immediate Data Transfer Command */
xfer->cmd_desc[0] =
CMD_0_ATTR_I |
CMD_I0_TID(xfer->cmd_tid) |
CMD_I0_DEV_INDEX(dat_idx) |
CMD_I0_DTT(data_len) |
CMD_I0_MODE(mode);
fill_data_bytes(xfer, data, data_len);
} else {
/* we use a Regular Data Transfer Command */
xfer->cmd_desc[0] =
CMD_0_ATTR_R |
CMD_R0_TID(xfer->cmd_tid) |
CMD_R0_DEV_INDEX(dat_idx) |
CMD_R0_MODE(mode) |
(rnw ? CMD_R0_RNW : 0);
xfer->cmd_desc[1] =
CMD_R1_DATA_LENGTH(data_len);
}
}
static int hci_cmd_v1_daa(struct i3c_hci *hci)
{
struct hci_xfer *xfer;
int ret, dat_idx = -1;
u8 next_addr = 0;
u64 pid;
unsigned int dcr, bcr;
DECLARE_COMPLETION_ONSTACK(done);
xfer = hci_alloc_xfer(2);
if (!xfer)
return -ENOMEM;
/*
* Simple for now: we allocate a temporary DAT entry, do a single
* DAA, register the device which will allocate its own DAT entry
* via the core callback, then free the temporary DAT entry.
* Loop until there is no more devices to assign an address to.
* Yes, there is room for improvements.
*/
for (;;) {
ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
if (ret < 0)
break;
dat_idx = ret;
ret = i3c_master_get_free_addr(&hci->master, next_addr);
if (ret < 0)
break;
next_addr = ret;
DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
mipi_i3c_hci_dct_index_reset(hci);
xfer->cmd_tid = hci_get_tid();
xfer->cmd_desc[0] =
CMD_0_ATTR_A |
CMD_A0_TID(xfer->cmd_tid) |
CMD_A0_CMD(I3C_CCC_ENTDAA) |
CMD_A0_DEV_INDEX(dat_idx) |
CMD_A0_DEV_COUNT(1) |
CMD_A0_ROC | CMD_A0_TOC;
xfer->cmd_desc[1] = 0;
hci->io->queue_xfer(hci, xfer, 1);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 1)) {
ret = -ETIME;
break;
}
if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
RESP_DATA_LENGTH(xfer->response) == 1) {
ret = 0; /* no more devices to be assigned */
break;
}
if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
ret = -EIO;
break;
}
i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
next_addr, pid, dcr, bcr);
mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
dat_idx = -1;
/*
* TODO: Extend the subsystem layer to allow for registering
* new device and provide BCR/DCR/PID at the same time.
*/
ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
if (ret)
break;
}
if (dat_idx >= 0)
mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
hci_free_xfer(xfer, 1);
return ret;
}
const struct hci_cmd_ops mipi_i3c_hci_cmd_v1 = {
.prep_ccc = hci_cmd_v1_prep_ccc,
.prep_i3c_xfer = hci_cmd_v1_prep_i3c_xfer,
.prep_i2c_xfer = hci_cmd_v1_prep_i2c_xfer,
.perform_daa = hci_cmd_v1_daa,
};
| linux-master | drivers/i3c/master/mipi-i3c-hci/cmd_v1.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include "hci.h"
#include "ext_caps.h"
#include "xfer_mode_rate.h"
/* Extended Capability Header */
#define CAP_HEADER_LENGTH GENMASK(23, 8)
#define CAP_HEADER_ID GENMASK(7, 0)
static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
{
hci->vendor_mipi_id = readl(base + 0x04);
hci->vendor_version_id = readl(base + 0x08);
hci->vendor_product_id = readl(base + 0x0c);
dev_info(&hci->master.dev, "vendor MIPI ID: %#x\n", hci->vendor_mipi_id);
dev_info(&hci->master.dev, "vendor version ID: %#x\n", hci->vendor_version_id);
dev_info(&hci->master.dev, "vendor product ID: %#x\n", hci->vendor_product_id);
/* ought to go in a table if this grows too much */
switch (hci->vendor_mipi_id) {
case MIPI_VENDOR_NXP:
hci->quirks |= HCI_QUIRK_RAW_CCC;
DBG("raw CCC quirks set");
break;
}
return 0;
}
static int hci_extcap_master_config(struct i3c_hci *hci, void __iomem *base)
{
u32 master_config = readl(base + 0x04);
unsigned int operation_mode = FIELD_GET(GENMASK(5, 4), master_config);
static const char * const functionality[] = {
"(unknown)", "master only", "target only",
"primary/secondary master" };
dev_info(&hci->master.dev, "operation mode: %s\n", functionality[operation_mode]);
if (operation_mode & 0x1)
return 0;
dev_err(&hci->master.dev, "only master mode is currently supported\n");
return -EOPNOTSUPP;
}
static int hci_extcap_multi_bus(struct i3c_hci *hci, void __iomem *base)
{
u32 bus_instance = readl(base + 0x04);
unsigned int count = FIELD_GET(GENMASK(3, 0), bus_instance);
dev_info(&hci->master.dev, "%d bus instances\n", count);
return 0;
}
static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
{
u32 header = readl(base);
u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
unsigned int index;
dev_info(&hci->master.dev, "transfer mode table has %d entries\n",
entries);
base += 4; /* skip header */
for (index = 0; index < entries; index++) {
u32 mode_entry = readl(base);
DBG("mode %d: 0x%08x", index, mode_entry);
/* TODO: will be needed when I3C core does more than SDR */
base += 4;
}
return 0;
}
static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
{
u32 header = readl(base);
u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
u32 rate_entry;
unsigned int index, rate, rate_id, mode_id;
base += 4; /* skip header */
dev_info(&hci->master.dev, "available data rates:\n");
for (index = 0; index < entries; index++) {
rate_entry = readl(base);
DBG("entry %d: 0x%08x", index, rate_entry);
rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
dev_info(&hci->master.dev, "rate %d for %s = %d kHz\n",
rate_id,
mode_id == XFERRATE_MODE_I3C ? "I3C" :
mode_id == XFERRATE_MODE_I2C ? "I2C" :
"unknown mode",
rate);
base += 4;
}
return 0;
}
static int hci_extcap_auto_command(struct i3c_hci *hci, void __iomem *base)
{
u32 autocmd_ext_caps = readl(base + 0x04);
unsigned int max_count = FIELD_GET(GENMASK(3, 0), autocmd_ext_caps);
u32 autocmd_ext_config = readl(base + 0x08);
unsigned int count = FIELD_GET(GENMASK(3, 0), autocmd_ext_config);
dev_info(&hci->master.dev, "%d/%d active auto-command entries\n",
count, max_count);
/* remember auto-command register location for later use */
hci->AUTOCMD_regs = base;
return 0;
}
static int hci_extcap_debug(struct i3c_hci *hci, void __iomem *base)
{
dev_info(&hci->master.dev, "debug registers present\n");
hci->DEBUG_regs = base;
return 0;
}
static int hci_extcap_scheduled_cmd(struct i3c_hci *hci, void __iomem *base)
{
dev_info(&hci->master.dev, "scheduled commands available\n");
/* hci->schedcmd_regs = base; */
return 0;
}
static int hci_extcap_non_curr_master(struct i3c_hci *hci, void __iomem *base)
{
dev_info(&hci->master.dev, "Non-Current Master support available\n");
/* hci->NCM_regs = base; */
return 0;
}
static int hci_extcap_ccc_resp_conf(struct i3c_hci *hci, void __iomem *base)
{
dev_info(&hci->master.dev, "CCC Response Configuration available\n");
return 0;
}
static int hci_extcap_global_DAT(struct i3c_hci *hci, void __iomem *base)
{
dev_info(&hci->master.dev, "Global DAT available\n");
return 0;
}
static int hci_extcap_multilane(struct i3c_hci *hci, void __iomem *base)
{
dev_info(&hci->master.dev, "Master Multi-Lane support available\n");
return 0;
}
static int hci_extcap_ncm_multilane(struct i3c_hci *hci, void __iomem *base)
{
dev_info(&hci->master.dev, "NCM Multi-Lane support available\n");
return 0;
}
struct hci_ext_caps {
u8 id;
u16 min_length;
int (*parser)(struct i3c_hci *hci, void __iomem *base);
};
#define EXT_CAP(_id, _highest_mandatory_reg_offset, _parser) \
{ .id = (_id), .parser = (_parser), \
.min_length = (_highest_mandatory_reg_offset)/4 + 1 }
static const struct hci_ext_caps ext_capabilities[] = {
EXT_CAP(0x01, 0x0c, hci_extcap_hardware_id),
EXT_CAP(0x02, 0x04, hci_extcap_master_config),
EXT_CAP(0x03, 0x04, hci_extcap_multi_bus),
EXT_CAP(0x04, 0x24, hci_extcap_xfer_modes),
EXT_CAP(0x05, 0x08, hci_extcap_auto_command),
EXT_CAP(0x08, 0x40, hci_extcap_xfer_rates),
EXT_CAP(0x0c, 0x10, hci_extcap_debug),
EXT_CAP(0x0d, 0x0c, hci_extcap_scheduled_cmd),
EXT_CAP(0x0e, 0x80, hci_extcap_non_curr_master), /* TODO confirm size */
EXT_CAP(0x0f, 0x04, hci_extcap_ccc_resp_conf),
EXT_CAP(0x10, 0x08, hci_extcap_global_DAT),
EXT_CAP(0x9d, 0x04, hci_extcap_multilane),
EXT_CAP(0x9e, 0x04, hci_extcap_ncm_multilane),
};
static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
{
hci->vendor_data = (__force void *)base;
dev_info(&hci->master.dev, "Build Date Info = %#x\n", readl(base + 1*4));
/* reset the FPGA */
writel(0xdeadbeef, base + 1*4);
return 0;
}
struct hci_ext_cap_vendor_specific {
u32 vendor;
u8 cap;
u16 min_length;
int (*parser)(struct i3c_hci *hci, void __iomem *base);
};
#define EXT_CAP_VENDOR(_vendor, _cap, _highest_mandatory_reg_offset) \
{ .vendor = (MIPI_VENDOR_##_vendor), .cap = (_cap), \
.parser = (hci_extcap_vendor_##_vendor), \
.min_length = (_highest_mandatory_reg_offset)/4 + 1 }
static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
};
static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
u32 cap_id, u32 cap_length)
{
const struct hci_ext_cap_vendor_specific *vendor_cap_entry;
int i;
vendor_cap_entry = NULL;
for (i = 0; i < ARRAY_SIZE(vendor_ext_caps); i++) {
if (vendor_ext_caps[i].vendor == hci->vendor_mipi_id &&
vendor_ext_caps[i].cap == cap_id) {
vendor_cap_entry = &vendor_ext_caps[i];
break;
}
}
if (!vendor_cap_entry) {
dev_notice(&hci->master.dev,
"unknown ext_cap 0x%02x for vendor 0x%02x\n",
cap_id, hci->vendor_mipi_id);
return 0;
}
if (cap_length < vendor_cap_entry->min_length) {
dev_err(&hci->master.dev,
"ext_cap 0x%02x has size %d (expecting >= %d)\n",
cap_id, cap_length, vendor_cap_entry->min_length);
return -EINVAL;
}
return vendor_cap_entry->parser(hci, base);
}
int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
{
void __iomem *curr_cap = hci->EXTCAPS_regs;
void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
u32 cap_header, cap_id, cap_length;
const struct hci_ext_caps *cap_entry;
int i, err = 0;
if (!curr_cap)
return 0;
for (; !err && curr_cap < end; curr_cap += cap_length * 4) {
cap_header = readl(curr_cap);
cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
DBG("id=0x%02x length=%d", cap_id, cap_length);
if (!cap_length)
break;
if (curr_cap + cap_length * 4 >= end) {
dev_err(&hci->master.dev,
"ext_cap 0x%02x has size %d (too big)\n",
cap_id, cap_length);
err = -EINVAL;
break;
}
if (cap_id >= 0xc0 && cap_id <= 0xcf) {
err = hci_extcap_vendor_specific(hci, curr_cap,
cap_id, cap_length);
continue;
}
cap_entry = NULL;
for (i = 0; i < ARRAY_SIZE(ext_capabilities); i++) {
if (ext_capabilities[i].id == cap_id) {
cap_entry = &ext_capabilities[i];
break;
}
}
if (!cap_entry) {
dev_notice(&hci->master.dev,
"unknown ext_cap 0x%02x\n", cap_id);
} else if (cap_length < cap_entry->min_length) {
dev_err(&hci->master.dev,
"ext_cap 0x%02x has size %d (expecting >= %d)\n",
cap_id, cap_length, cap_entry->min_length);
err = -EINVAL;
} else {
err = cap_entry->parser(hci, curr_cap);
}
}
return err;
}
| linux-master | drivers/i3c/master/mipi-i3c-hci/ext_caps.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*
* I3C HCI v2.0 Command Descriptor Handling
*
* Note: The I3C HCI v2.0 spec is still in flux. The code here will change.
*/
#include <linux/bitfield.h>
#include <linux/i3c/master.h>
#include "hci.h"
#include "cmd.h"
#include "xfer_mode_rate.h"
/*
* Unified Data Transfer Command
*/
#define CMD_0_ATTR_U FIELD_PREP(CMD_0_ATTR, 0x4)
#define CMD_U3_HDR_TSP_ML_CTRL(v) FIELD_PREP(W3_MASK(107, 104), v)
#define CMD_U3_IDB4(v) FIELD_PREP(W3_MASK(103, 96), v)
#define CMD_U3_HDR_CMD(v) FIELD_PREP(W3_MASK(103, 96), v)
#define CMD_U2_IDB3(v) FIELD_PREP(W2_MASK( 95, 88), v)
#define CMD_U2_HDR_BT(v) FIELD_PREP(W2_MASK( 95, 88), v)
#define CMD_U2_IDB2(v) FIELD_PREP(W2_MASK( 87, 80), v)
#define CMD_U2_BT_CMD2(v) FIELD_PREP(W2_MASK( 87, 80), v)
#define CMD_U2_IDB1(v) FIELD_PREP(W2_MASK( 79, 72), v)
#define CMD_U2_BT_CMD1(v) FIELD_PREP(W2_MASK( 79, 72), v)
#define CMD_U2_IDB0(v) FIELD_PREP(W2_MASK( 71, 64), v)
#define CMD_U2_BT_CMD0(v) FIELD_PREP(W2_MASK( 71, 64), v)
#define CMD_U1_ERR_HANDLING(v) FIELD_PREP(W1_MASK( 63, 62), v)
#define CMD_U1_ADD_FUNC(v) FIELD_PREP(W1_MASK( 61, 56), v)
#define CMD_U1_COMBO_XFER W1_BIT_( 55)
#define CMD_U1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
#define CMD_U0_TOC W0_BIT_( 31)
#define CMD_U0_ROC W0_BIT_( 30)
#define CMD_U0_MAY_YIELD W0_BIT_( 29)
#define CMD_U0_NACK_RCNT(v) FIELD_PREP(W0_MASK( 28, 27), v)
#define CMD_U0_IDB_COUNT(v) FIELD_PREP(W0_MASK( 26, 24), v)
#define CMD_U0_MODE_INDEX(v) FIELD_PREP(W0_MASK( 22, 18), v)
#define CMD_U0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
#define CMD_U0_DEV_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
#define CMD_U0_RnW W0_BIT_( 7)
#define CMD_U0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
/*
* Address Assignment Command
*/
#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
#define CMD_A1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
#define CMD_A0_TOC W0_BIT_( 31)
#define CMD_A0_ROC W0_BIT_( 30)
#define CMD_A0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
#define CMD_A0_ASSIGN_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
static unsigned int get_i3c_rate_idx(struct i3c_hci *hci)
{
struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
if (bus->scl_rate.i3c >= 12000000)
return XFERRATE_I3C_SDR0;
if (bus->scl_rate.i3c > 8000000)
return XFERRATE_I3C_SDR1;
if (bus->scl_rate.i3c > 6000000)
return XFERRATE_I3C_SDR2;
if (bus->scl_rate.i3c > 4000000)
return XFERRATE_I3C_SDR3;
if (bus->scl_rate.i3c > 2000000)
return XFERRATE_I3C_SDR4;
return XFERRATE_I3C_SDR_FM_FMP;
}
static unsigned int get_i2c_rate_idx(struct i3c_hci *hci)
{
struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
if (bus->scl_rate.i2c >= 1000000)
return XFERRATE_I2C_FMP;
return XFERRATE_I2C_FM;
}
static void hci_cmd_v2_prep_private_xfer(struct i3c_hci *hci,
struct hci_xfer *xfer,
u8 addr, unsigned int mode,
unsigned int rate)
{
u8 *data = xfer->data;
unsigned int data_len = xfer->data_len;
bool rnw = xfer->rnw;
xfer->cmd_tid = hci_get_tid();
if (!rnw && data_len <= 5) {
xfer->cmd_desc[0] =
CMD_0_ATTR_U |
CMD_U0_TID(xfer->cmd_tid) |
CMD_U0_DEV_ADDRESS(addr) |
CMD_U0_XFER_RATE(rate) |
CMD_U0_MODE_INDEX(mode) |
CMD_U0_IDB_COUNT(data_len);
xfer->cmd_desc[1] =
CMD_U1_DATA_LENGTH(0);
xfer->cmd_desc[2] = 0;
xfer->cmd_desc[3] = 0;
switch (data_len) {
case 5:
xfer->cmd_desc[3] |= CMD_U3_IDB4(data[4]);
fallthrough;
case 4:
xfer->cmd_desc[2] |= CMD_U2_IDB3(data[3]);
fallthrough;
case 3:
xfer->cmd_desc[2] |= CMD_U2_IDB2(data[2]);
fallthrough;
case 2:
xfer->cmd_desc[2] |= CMD_U2_IDB1(data[1]);
fallthrough;
case 1:
xfer->cmd_desc[2] |= CMD_U2_IDB0(data[0]);
fallthrough;
case 0:
break;
}
/* we consumed all the data with the cmd descriptor */
xfer->data = NULL;
} else {
xfer->cmd_desc[0] =
CMD_0_ATTR_U |
CMD_U0_TID(xfer->cmd_tid) |
(rnw ? CMD_U0_RnW : 0) |
CMD_U0_DEV_ADDRESS(addr) |
CMD_U0_XFER_RATE(rate) |
CMD_U0_MODE_INDEX(mode);
xfer->cmd_desc[1] =
CMD_U1_DATA_LENGTH(data_len);
xfer->cmd_desc[2] = 0;
xfer->cmd_desc[3] = 0;
}
}
static int hci_cmd_v2_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
u8 ccc_addr, u8 ccc_cmd, bool raw)
{
unsigned int mode = XFERMODE_IDX_I3C_SDR;
unsigned int rate = get_i3c_rate_idx(hci);
u8 *data = xfer->data;
unsigned int data_len = xfer->data_len;
bool rnw = xfer->rnw;
if (raw && ccc_addr != I3C_BROADCAST_ADDR) {
hci_cmd_v2_prep_private_xfer(hci, xfer, ccc_addr, mode, rate);
return 0;
}
xfer->cmd_tid = hci_get_tid();
if (!rnw && data_len <= 4) {
xfer->cmd_desc[0] =
CMD_0_ATTR_U |
CMD_U0_TID(xfer->cmd_tid) |
CMD_U0_DEV_ADDRESS(ccc_addr) |
CMD_U0_XFER_RATE(rate) |
CMD_U0_MODE_INDEX(mode) |
CMD_U0_IDB_COUNT(data_len + (!raw ? 0 : 1));
xfer->cmd_desc[1] =
CMD_U1_DATA_LENGTH(0);
xfer->cmd_desc[2] =
CMD_U2_IDB0(ccc_cmd);
xfer->cmd_desc[3] = 0;
switch (data_len) {
case 4:
xfer->cmd_desc[3] |= CMD_U3_IDB4(data[3]);
fallthrough;
case 3:
xfer->cmd_desc[2] |= CMD_U2_IDB3(data[2]);
fallthrough;
case 2:
xfer->cmd_desc[2] |= CMD_U2_IDB2(data[1]);
fallthrough;
case 1:
xfer->cmd_desc[2] |= CMD_U2_IDB1(data[0]);
fallthrough;
case 0:
break;
}
/* we consumed all the data with the cmd descriptor */
xfer->data = NULL;
} else {
xfer->cmd_desc[0] =
CMD_0_ATTR_U |
CMD_U0_TID(xfer->cmd_tid) |
(rnw ? CMD_U0_RnW : 0) |
CMD_U0_DEV_ADDRESS(ccc_addr) |
CMD_U0_XFER_RATE(rate) |
CMD_U0_MODE_INDEX(mode) |
CMD_U0_IDB_COUNT(!raw ? 0 : 1);
xfer->cmd_desc[1] =
CMD_U1_DATA_LENGTH(data_len);
xfer->cmd_desc[2] =
CMD_U2_IDB0(ccc_cmd);
xfer->cmd_desc[3] = 0;
}
return 0;
}
static void hci_cmd_v2_prep_i3c_xfer(struct i3c_hci *hci,
struct i3c_dev_desc *dev,
struct hci_xfer *xfer)
{
unsigned int mode = XFERMODE_IDX_I3C_SDR;
unsigned int rate = get_i3c_rate_idx(hci);
u8 addr = dev->info.dyn_addr;
hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
}
static void hci_cmd_v2_prep_i2c_xfer(struct i3c_hci *hci,
struct i2c_dev_desc *dev,
struct hci_xfer *xfer)
{
unsigned int mode = XFERMODE_IDX_I2C;
unsigned int rate = get_i2c_rate_idx(hci);
u8 addr = dev->addr;
hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
}
static int hci_cmd_v2_daa(struct i3c_hci *hci)
{
struct hci_xfer *xfer;
int ret;
u8 next_addr = 0;
u32 device_id[2];
u64 pid;
unsigned int dcr, bcr;
DECLARE_COMPLETION_ONSTACK(done);
xfer = hci_alloc_xfer(2);
if (!xfer)
return -ENOMEM;
xfer[0].data = &device_id;
xfer[0].data_len = 8;
xfer[0].rnw = true;
xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
xfer[1].completion = &done;
for (;;) {
ret = i3c_master_get_free_addr(&hci->master, next_addr);
if (ret < 0)
break;
next_addr = ret;
DBG("next_addr = 0x%02x", next_addr);
xfer[0].cmd_tid = hci_get_tid();
xfer[0].cmd_desc[0] =
CMD_0_ATTR_A |
CMD_A0_TID(xfer[0].cmd_tid) |
CMD_A0_ROC;
xfer[1].cmd_tid = hci_get_tid();
xfer[1].cmd_desc[0] =
CMD_0_ATTR_A |
CMD_A0_TID(xfer[1].cmd_tid) |
CMD_A0_ASSIGN_ADDRESS(next_addr) |
CMD_A0_ROC |
CMD_A0_TOC;
hci->io->queue_xfer(hci, xfer, 2);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 2)) {
ret = -ETIME;
break;
}
if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
ret = 0; /* no more devices to be assigned */
break;
}
if (RESP_STATUS(xfer[1].response) != RESP_SUCCESS) {
ret = -EIO;
break;
}
pid = FIELD_GET(W1_MASK(47, 32), device_id[1]);
pid = (pid << 32) | device_id[0];
bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
next_addr, pid, dcr, bcr);
/*
* TODO: Extend the subsystem layer to allow for registering
* new device and provide BCR/DCR/PID at the same time.
*/
ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
if (ret)
break;
}
hci_free_xfer(xfer, 2);
return ret;
}
const struct hci_cmd_ops mipi_i3c_hci_cmd_v2 = {
.prep_ccc = hci_cmd_v2_prep_ccc,
.prep_i3c_xfer = hci_cmd_v2_prep_i3c_xfer,
.prep_i2c_xfer = hci_cmd_v2_prep_i2c_xfer,
.perform_daa = hci_cmd_v2_daa,
};
| linux-master | drivers/i3c/master/mipi-i3c-hci/cmd_v2.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*
* Core driver code with main interface to the I3C subsystem.
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "hci.h"
#include "ext_caps.h"
#include "cmd.h"
#include "dat.h"
/*
* Host Controller Capabilities and Operation Registers
*/
#define reg_read(r) readl(hci->base_regs + (r))
#define reg_write(r, v) writel(v, hci->base_regs + (r))
#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
#define HCI_VERSION 0x00 /* HCI Version (in BCD) */
#define HC_CONTROL 0x04
#define HC_CONTROL_BUS_ENABLE BIT(31)
#define HC_CONTROL_RESUME BIT(30)
#define HC_CONTROL_ABORT BIT(29)
#define HC_CONTROL_HALT_ON_CMD_TIMEOUT BIT(12)
#define HC_CONTROL_HOT_JOIN_CTRL BIT(8) /* Hot-Join ACK/NACK Control */
#define HC_CONTROL_I2C_TARGET_PRESENT BIT(7)
#define HC_CONTROL_PIO_MODE BIT(6) /* DMA/PIO Mode Selector */
#define HC_CONTROL_DATA_BIG_ENDIAN BIT(4)
#define HC_CONTROL_IBA_INCLUDE BIT(0) /* Include I3C Broadcast Address */
#define MASTER_DEVICE_ADDR 0x08 /* Master Device Address */
#define MASTER_DYNAMIC_ADDR_VALID BIT(31) /* Dynamic Address is Valid */
#define MASTER_DYNAMIC_ADDR(v) FIELD_PREP(GENMASK(22, 16), v)
#define HC_CAPABILITIES 0x0c
#define HC_CAP_SG_DC_EN BIT(30)
#define HC_CAP_SG_IBI_EN BIT(29)
#define HC_CAP_SG_CR_EN BIT(28)
#define HC_CAP_MAX_DATA_LENGTH GENMASK(24, 22)
#define HC_CAP_CMD_SIZE GENMASK(21, 20)
#define HC_CAP_DIRECT_COMMANDS_EN BIT(18)
#define HC_CAP_MULTI_LANE_EN BIT(15)
#define HC_CAP_CMD_CCC_DEFBYTE BIT(10)
#define HC_CAP_HDR_BT_EN BIT(8)
#define HC_CAP_HDR_TS_EN BIT(7)
#define HC_CAP_HDR_DDR_EN BIT(6)
#define HC_CAP_NON_CURRENT_MASTER_CAP BIT(5) /* master handoff capable */
#define HC_CAP_DATA_BYTE_CFG_EN BIT(4) /* endian selection possible */
#define HC_CAP_AUTO_COMMAND BIT(3)
#define HC_CAP_COMBO_COMMAND BIT(2)
#define RESET_CONTROL 0x10
#define BUS_RESET BIT(31)
#define BUS_RESET_TYPE GENMASK(30, 29)
#define IBI_QUEUE_RST BIT(5)
#define RX_FIFO_RST BIT(4)
#define TX_FIFO_RST BIT(3)
#define RESP_QUEUE_RST BIT(2)
#define CMD_QUEUE_RST BIT(1)
#define SOFT_RST BIT(0) /* Core Reset */
#define PRESENT_STATE 0x14
#define STATE_CURRENT_MASTER BIT(2)
#define INTR_STATUS 0x20
#define INTR_STATUS_ENABLE 0x24
#define INTR_SIGNAL_ENABLE 0x28
#define INTR_FORCE 0x2c
#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
#define INTR_HC_PIO BIT(8) /* cascaded PIO interrupt */
#define INTR_HC_RINGS GENMASK(7, 0)
#define DAT_SECTION 0x30 /* Device Address Table */
#define DAT_ENTRY_SIZE GENMASK(31, 28)
#define DAT_TABLE_SIZE GENMASK(18, 12)
#define DAT_TABLE_OFFSET GENMASK(11, 0)
#define DCT_SECTION 0x34 /* Device Characteristics Table */
#define DCT_ENTRY_SIZE GENMASK(31, 28)
#define DCT_TABLE_INDEX GENMASK(23, 19)
#define DCT_TABLE_SIZE GENMASK(18, 12)
#define DCT_TABLE_OFFSET GENMASK(11, 0)
#define RING_HEADERS_SECTION 0x38
#define RING_HEADERS_OFFSET GENMASK(15, 0)
#define PIO_SECTION 0x3c
#define PIO_REGS_OFFSET GENMASK(15, 0) /* PIO Offset */
#define EXT_CAPS_SECTION 0x40
#define EXT_CAPS_OFFSET GENMASK(15, 0)
#define IBI_NOTIFY_CTRL 0x58 /* IBI Notify Control */
#define IBI_NOTIFY_SIR_REJECTED BIT(3) /* Rejected Target Interrupt Request */
#define IBI_NOTIFY_MR_REJECTED BIT(1) /* Rejected Master Request Control */
#define IBI_NOTIFY_HJ_REJECTED BIT(0) /* Rejected Hot-Join Control */
#define DEV_CTX_BASE_LO 0x60
#define DEV_CTX_BASE_HI 0x64
static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
{
return container_of(m, struct i3c_hci, master);
}
static int i3c_hci_bus_init(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_device_info info;
int ret;
DBG("");
if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
ret = mipi_i3c_hci_dat_v1.init(hci);
if (ret)
return ret;
}
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
return ret;
reg_write(MASTER_DEVICE_ADDR,
MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
memset(&info, 0, sizeof(info));
info.dyn_addr = ret;
ret = i3c_master_set_info(m, &info);
if (ret)
return ret;
ret = hci->io->init(hci);
if (ret)
return ret;
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
return 0;
}
static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
DBG("");
reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
hci->io->cleanup(hci);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.cleanup(hci);
}
void mipi_i3c_hci_resume(struct i3c_hci *hci)
{
/* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
reg_write(HC_CONTROL, reg_read(HC_CONTROL));
}
/* located here rather than pio.c because needed bits are in core reg space */
void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
{
reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
}
/* located here rather than dct.c because needed bits are in core reg space */
void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
{
reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
}
static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
struct i3c_ccc_cmd *ccc)
{
struct i3c_hci *hci = to_i3c_hci(m);
struct hci_xfer *xfer;
bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
unsigned int nxfers = ccc->ndests + prefixed;
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
return -ENOMEM;
if (prefixed) {
xfer->data = NULL;
xfer->data_len = 0;
xfer->rnw = false;
hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
ccc->id, true);
xfer++;
}
for (i = 0; i < nxfers - prefixed; i++) {
xfer[i].data = ccc->dests[i].payload.data;
xfer[i].data_len = ccc->dests[i].payload.len;
xfer[i].rnw = ccc->rnw;
ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
ccc->id, raw);
if (ret)
goto out;
xfer[i].cmd_desc[0] |= CMD_0_ROC;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
if (prefixed)
xfer--;
ret = hci->io->queue_xfer(hci, xfer, nxfers);
if (ret)
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
}
for (i = prefixed; i < nxfers; i++) {
if (ccc->rnw)
ccc->dests[i - prefixed].payload.len =
RESP_DATA_LENGTH(xfer[i].response);
if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
ret = -EIO;
goto out;
}
}
if (ccc->rnw)
DBG("got: %*ph",
ccc->dests[0].payload.len, ccc->dests[0].payload.data);
out:
hci_free_xfer(xfer, nxfers);
return ret;
}
static int i3c_hci_daa(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
DBG("");
return hci->cmd->perform_daa(hci);
}
static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *i3c_xfers,
int nxfers)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct hci_xfer *xfer;
DECLARE_COMPLETION_ONSTACK(done);
unsigned int size_limit;
int i, last, ret = 0;
DBG("nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
return -ENOMEM;
size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
for (i = 0; i < nxfers; i++) {
xfer[i].data_len = i3c_xfers[i].len;
ret = -EFBIG;
if (xfer[i].data_len >= size_limit)
goto out;
xfer[i].rnw = i3c_xfers[i].rnw;
if (i3c_xfers[i].rnw) {
xfer[i].data = i3c_xfers[i].data.in;
} else {
/* silence the const qualifier warning with a cast */
xfer[i].data = (void *) i3c_xfers[i].data.out;
}
hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
ret = hci->io->queue_xfer(hci, xfer, nxfers);
if (ret)
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
}
for (i = 0; i < nxfers; i++) {
if (i3c_xfers[i].rnw)
i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
ret = -EIO;
goto out;
}
}
out:
hci_free_xfer(xfer, nxfers);
return ret;
}
static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
const struct i2c_msg *i2c_xfers, int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct hci_xfer *xfer;
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
DBG("nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
return -ENOMEM;
for (i = 0; i < nxfers; i++) {
xfer[i].data = i2c_xfers[i].buf;
xfer[i].data_len = i2c_xfers[i].len;
xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
ret = hci->io->queue_xfer(hci, xfer, nxfers);
if (ret)
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
}
for (i = 0; i < nxfers; i++) {
if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
ret = -EIO;
goto out;
}
}
out:
hci_free_xfer(xfer, nxfers);
return ret;
}
static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data;
int ret;
DBG("");
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
if (ret < 0) {
kfree(dev_data);
return ret;
}
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
dev_data->dat_idx = ret;
}
i3c_dev_set_master_data(dev, dev_data);
return 0;
}
static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
DBG("");
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
dev->info.dyn_addr);
return 0;
}
static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
DBG("");
i3c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
kfree(dev_data);
}
static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data;
int ret;
DBG("");
if (hci->cmd != &mipi_i3c_hci_cmd_v1)
return 0;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
if (ret < 0) {
kfree(dev_data);
return ret;
}
mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
dev_data->dat_idx = ret;
i2c_dev_set_master_data(dev, dev_data);
return 0;
}
static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
DBG("");
if (dev_data) {
i2c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
kfree(dev_data);
}
}
static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
unsigned int dat_idx = dev_data->dat_idx;
if (req->max_payload_len != 0)
mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
else
mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
return hci->io->request_ibi(hci, dev, req);
}
static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
hci->io->free_ibi(hci, dev);
}
static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
}
static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
}
static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
hci->io->recycle_ibi_slot(hci, dev, slot);
}
static const struct i3c_master_controller_ops i3c_hci_ops = {
.bus_init = i3c_hci_bus_init,
.bus_cleanup = i3c_hci_bus_cleanup,
.do_daa = i3c_hci_daa,
.send_ccc_cmd = i3c_hci_send_ccc_cmd,
.priv_xfers = i3c_hci_priv_xfers,
.i2c_xfers = i3c_hci_i2c_xfers,
.attach_i3c_dev = i3c_hci_attach_i3c_dev,
.reattach_i3c_dev = i3c_hci_reattach_i3c_dev,
.detach_i3c_dev = i3c_hci_detach_i3c_dev,
.attach_i2c_dev = i3c_hci_attach_i2c_dev,
.detach_i2c_dev = i3c_hci_detach_i2c_dev,
.request_ibi = i3c_hci_request_ibi,
.free_ibi = i3c_hci_free_ibi,
.enable_ibi = i3c_hci_enable_ibi,
.disable_ibi = i3c_hci_disable_ibi,
.recycle_ibi_slot = i3c_hci_recycle_ibi_slot,
};
static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
{
struct i3c_hci *hci = dev_id;
irqreturn_t result = IRQ_NONE;
u32 val;
val = reg_read(INTR_STATUS);
DBG("INTR_STATUS = %#x", val);
if (val) {
reg_write(INTR_STATUS, val);
} else {
/* v1.0 does not have PIO cascaded notification bits */
val |= INTR_HC_PIO;
}
if (val & INTR_HC_RESET_CANCEL) {
DBG("cancelled reset");
val &= ~INTR_HC_RESET_CANCEL;
}
if (val & INTR_HC_INTERNAL_ERR) {
dev_err(&hci->master.dev, "Host Controller Internal Error\n");
val &= ~INTR_HC_INTERNAL_ERR;
}
if (val & INTR_HC_PIO) {
hci->io->irq_handler(hci, 0);
val &= ~INTR_HC_PIO;
}
if (val & INTR_HC_RINGS) {
hci->io->irq_handler(hci, val & INTR_HC_RINGS);
val &= ~INTR_HC_RINGS;
}
if (val)
dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
else
result = IRQ_HANDLED;
return result;
}
static int i3c_hci_init(struct i3c_hci *hci)
{
u32 regval, offset;
int ret;
/* Validate HCI hardware version */
regval = reg_read(HCI_VERSION);
hci->version_major = (regval >> 8) & 0xf;
hci->version_minor = (regval >> 4) & 0xf;
hci->revision = regval & 0xf;
dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
hci->version_major, hci->version_minor, hci->revision);
/* known versions */
switch (regval & ~0xf) {
case 0x100: /* version 1.0 */
case 0x110: /* version 1.1 */
case 0x200: /* version 2.0 */
break;
default:
dev_err(&hci->master.dev, "unsupported HCI version\n");
return -EPROTONOSUPPORT;
}
hci->caps = reg_read(HC_CAPABILITIES);
DBG("caps = %#x", hci->caps);
regval = reg_read(DAT_SECTION);
offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
hci->DAT_entries, hci->DAT_entry_size * 4, offset);
regval = reg_read(DCT_SECTION);
offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
hci->DCT_entries, hci->DCT_entry_size * 4, offset);
regval = reg_read(RING_HEADERS_SECTION);
offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
regval = reg_read(PIO_SECTION);
offset = FIELD_GET(PIO_REGS_OFFSET, regval);
hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
regval = reg_read(EXT_CAPS_SECTION);
offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
ret = i3c_hci_parse_ext_caps(hci);
if (ret)
return ret;
/*
* Now let's reset the hardware.
* SOFT_RST must be clear before we write to it.
* Then we must wait until it clears again.
*/
ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
!(regval & SOFT_RST), 1, 10000);
if (ret)
return -ENXIO;
reg_write(RESET_CONTROL, SOFT_RST);
ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
!(regval & SOFT_RST), 1, 10000);
if (ret)
return -ENXIO;
/* Disable all interrupts and allow all signal updates */
reg_write(INTR_SIGNAL_ENABLE, 0x0);
reg_write(INTR_STATUS_ENABLE, 0xffffffff);
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
regval |= HC_CONTROL_DATA_BIG_ENDIAN;
reg_write(HC_CONTROL, regval);
regval = reg_read(HC_CONTROL);
if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
dev_err(&hci->master.dev, "cannot set BE mode\n");
return -EOPNOTSUPP;
}
}
} else {
if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
reg_write(HC_CONTROL, regval);
regval = reg_read(HC_CONTROL);
if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
dev_err(&hci->master.dev, "cannot clear BE mode\n");
return -EOPNOTSUPP;
}
}
}
/* Select our command descriptor model */
switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
case 0:
hci->cmd = &mipi_i3c_hci_cmd_v1;
break;
case 1:
hci->cmd = &mipi_i3c_hci_cmd_v2;
break;
default:
dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
return -EINVAL;
}
/* Try activating DMA operations first */
if (hci->RHS_regs) {
reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
dev_err(&hci->master.dev, "PIO mode is stuck\n");
ret = -EIO;
} else {
hci->io = &mipi_i3c_hci_dma;
dev_info(&hci->master.dev, "Using DMA\n");
}
}
/* If no DMA, try PIO */
if (!hci->io && hci->PIO_regs) {
reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
dev_err(&hci->master.dev, "DMA mode is stuck\n");
ret = -EIO;
} else {
hci->io = &mipi_i3c_hci_pio;
dev_info(&hci->master.dev, "Using PIO\n");
}
}
if (!hci->io) {
dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
if (!ret)
ret = -EINVAL;
return ret;
}
return 0;
}
static int i3c_hci_probe(struct platform_device *pdev)
{
struct i3c_hci *hci;
int irq, ret;
hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
if (!hci)
return -ENOMEM;
hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hci->base_regs))
return PTR_ERR(hci->base_regs);
platform_set_drvdata(pdev, hci);
/* temporary for dev_printk's, to be replaced in i3c_master_register */
hci->master.dev.init_name = dev_name(&pdev->dev);
ret = i3c_hci_init(hci);
if (ret)
return ret;
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
0, NULL, hci);
if (ret)
return ret;
ret = i3c_master_register(&hci->master, &pdev->dev,
&i3c_hci_ops, false);
if (ret)
return ret;
return 0;
}
static void i3c_hci_remove(struct platform_device *pdev)
{
struct i3c_hci *hci = platform_get_drvdata(pdev);
i3c_master_unregister(&hci->master);
}
static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
{ .compatible = "mipi-i3c-hci", },
{},
};
MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
static struct platform_driver i3c_hci_driver = {
.probe = i3c_hci_probe,
.remove_new = i3c_hci_remove,
.driver = {
.name = "mipi-i3c-hci",
.of_match_table = of_match_ptr(i3c_hci_of_match),
},
};
module_platform_driver(i3c_hci_driver);
MODULE_AUTHOR("Nicolas Pitre <[email protected]>");
MODULE_DESCRIPTION("MIPI I3C HCI driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/i3c/master/mipi-i3c-hci/core.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
#include "hci.h"
#include "dat.h"
/*
* Device Address Table Structure
*/
#define DAT_1_AUTOCMD_HDR_CODE W1_MASK(58, 51)
#define DAT_1_AUTOCMD_MODE W1_MASK(50, 48)
#define DAT_1_AUTOCMD_VALUE W1_MASK(47, 40)
#define DAT_1_AUTOCMD_MASK W1_MASK(39, 32)
/* DAT_0_I2C_DEVICE W0_BIT_(31) */
#define DAT_0_DEV_NACK_RETRY_CNT W0_MASK(30, 29)
#define DAT_0_RING_ID W0_MASK(28, 26)
#define DAT_0_DYNADDR_PARITY W0_BIT_(23)
#define DAT_0_DYNAMIC_ADDRESS W0_MASK(22, 16)
#define DAT_0_TS W0_BIT_(15)
#define DAT_0_MR_REJECT W0_BIT_(14)
/* DAT_0_SIR_REJECT W0_BIT_(13) */
/* DAT_0_IBI_PAYLOAD W0_BIT_(12) */
#define DAT_0_STATIC_ADDRESS W0_MASK(6, 0)
#define dat_w0_read(i) readl(hci->DAT_regs + (i) * 8)
#define dat_w1_read(i) readl(hci->DAT_regs + (i) * 8 + 4)
#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
static inline bool dynaddr_parity(unsigned int addr)
{
addr |= 1 << 7;
addr += addr >> 4;
addr += addr >> 2;
addr += addr >> 1;
return (addr & 1);
}
static int hci_dat_v1_init(struct i3c_hci *hci)
{
unsigned int dat_idx;
if (!hci->DAT_regs) {
dev_err(&hci->master.dev,
"only DAT in register space is supported at the moment\n");
return -EOPNOTSUPP;
}
if (hci->DAT_entry_size != 8) {
dev_err(&hci->master.dev,
"only 8-bytes DAT entries are supported at the moment\n");
return -EOPNOTSUPP;
}
/* use a bitmap for faster free slot search */
hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
if (!hci->DAT_data)
return -ENOMEM;
/* clear them */
for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
dat_w0_write(dat_idx, 0);
dat_w1_write(dat_idx, 0);
}
return 0;
}
static void hci_dat_v1_cleanup(struct i3c_hci *hci)
{
bitmap_free(hci->DAT_data);
hci->DAT_data = NULL;
}
static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
{
unsigned int dat_idx;
dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
if (dat_idx >= hci->DAT_entries)
return -ENOENT;
__set_bit(dat_idx, hci->DAT_data);
/* default flags */
dat_w0_write(dat_idx, DAT_0_SIR_REJECT | DAT_0_MR_REJECT);
return dat_idx;
}
static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
{
dat_w0_write(dat_idx, 0);
dat_w1_write(dat_idx, 0);
__clear_bit(dat_idx, hci->DAT_data);
}
static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
unsigned int dat_idx, u8 address)
{
u32 dat_w0;
dat_w0 = dat_w0_read(dat_idx);
dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
(dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
dat_w0_write(dat_idx, dat_w0);
}
static void hci_dat_v1_set_static_addr(struct i3c_hci *hci,
unsigned int dat_idx, u8 address)
{
u32 dat_w0;
dat_w0 = dat_w0_read(dat_idx);
dat_w0 &= ~DAT_0_STATIC_ADDRESS;
dat_w0 |= FIELD_PREP(DAT_0_STATIC_ADDRESS, address);
dat_w0_write(dat_idx, dat_w0);
}
static void hci_dat_v1_set_flags(struct i3c_hci *hci, unsigned int dat_idx,
u32 w0_flags, u32 w1_flags)
{
u32 dat_w0, dat_w1;
dat_w0 = dat_w0_read(dat_idx);
dat_w1 = dat_w1_read(dat_idx);
dat_w0 |= w0_flags;
dat_w1 |= w1_flags;
dat_w0_write(dat_idx, dat_w0);
dat_w1_write(dat_idx, dat_w1);
}
static void hci_dat_v1_clear_flags(struct i3c_hci *hci, unsigned int dat_idx,
u32 w0_flags, u32 w1_flags)
{
u32 dat_w0, dat_w1;
dat_w0 = dat_w0_read(dat_idx);
dat_w1 = dat_w1_read(dat_idx);
dat_w0 &= ~w0_flags;
dat_w1 &= ~w1_flags;
dat_w0_write(dat_idx, dat_w0);
dat_w1_write(dat_idx, dat_w1);
}
static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
{
unsigned int dat_idx;
u32 dat_w0;
for_each_set_bit(dat_idx, hci->DAT_data, hci->DAT_entries) {
dat_w0 = dat_w0_read(dat_idx);
if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr)
return dat_idx;
}
return -ENODEV;
}
const struct hci_dat_ops mipi_i3c_hci_dat_v1 = {
.init = hci_dat_v1_init,
.cleanup = hci_dat_v1_cleanup,
.alloc_entry = hci_dat_v1_alloc_entry,
.free_entry = hci_dat_v1_free_entry,
.set_dynamic_addr = hci_dat_v1_set_dynamic_addr,
.set_static_addr = hci_dat_v1_set_static_addr,
.set_flags = hci_dat_v1_set_flags,
.clear_flags = hci_dat_v1_clear_flags,
.get_index = hci_dat_v1_get_index,
};
| linux-master | drivers/i3c/master/mipi-i3c-hci/dat_v1.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*
* Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
* v1.x of the spec and v2.0 will likely be split out.
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
#include "hci.h"
#include "cmd.h"
#include "ibi.h"
/*
* Software Parameter Values (somewhat arb itrary for now).
* Some of them could be determined at run time eventually.
*/
#define XFER_RINGS 1 /* max: 8 */
#define XFER_RING_ENTRIES 16 /* max: 255 */
#define IBI_RINGS 1 /* max: 8 */
#define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */
#define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */
#define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */
/*
* Ring Header Preamble
*/
#define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
#define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
#define RHS_CONTROL 0x00
#define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */
#define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */
#define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */
#define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */
#define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
/*
* Ring Header (Per-Ring Bundle)
*/
#define rh_reg_read(r) readl(rh->regs + (RH_##r))
#define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
#define RH_CR_SETUP 0x00 /* Command/Response Ring */
#define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
#define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
#define CR_RING_SIZE GENMASK(8, 0)
#define RH_IBI_SETUP 0x04
#define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
#define IBI_STATUS_RING_SIZE GENMASK(23, 16)
#define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
#define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
#define RH_CHUNK_CONTROL 0x08
#define RH_INTR_STATUS 0x10
#define RH_INTR_STATUS_ENABLE 0x14
#define RH_INTR_SIGNAL_ENABLE 0x18
#define RH_INTR_FORCE 0x1c
#define INTR_IBI_READY BIT(12)
#define INTR_TRANSFER_COMPLETION BIT(11)
#define INTR_RING_OP BIT(10)
#define INTR_TRANSFER_ERR BIT(9)
#define INTR_WARN_INS_STOP_MODE BIT(7)
#define INTR_IBI_RING_FULL BIT(6)
#define INTR_TRANSFER_ABORT BIT(5)
#define RH_RING_STATUS 0x20
#define RING_STATUS_LOCKED BIT(3)
#define RING_STATUS_ABORTED BIT(2)
#define RING_STATUS_RUNNING BIT(1)
#define RING_STATUS_ENABLED BIT(0)
#define RH_RING_CONTROL 0x24
#define RING_CTRL_ABORT BIT(2)
#define RING_CTRL_RUN_STOP BIT(1)
#define RING_CTRL_ENABLE BIT(0)
#define RH_RING_OPERATION1 0x28
#define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
#define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
#define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
#define RH_RING_OPERATION2 0x2c
#define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
#define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
#define RH_CMD_RING_BASE_LO 0x30
#define RH_CMD_RING_BASE_HI 0x34
#define RH_RESP_RING_BASE_LO 0x38
#define RH_RESP_RING_BASE_HI 0x3c
#define RH_IBI_STATUS_RING_BASE_LO 0x40
#define RH_IBI_STATUS_RING_BASE_HI 0x44
#define RH_IBI_DATA_RING_BASE_LO 0x48
#define RH_IBI_DATA_RING_BASE_HI 0x4c
#define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */
#define RH_RESP_RING_SG 0x54
#define RH_IBI_STATUS_RING_SG 0x58
#define RH_IBI_DATA_RING_SG 0x5c
#define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */
#define RING_SG_LIST_SIZE GENMASK(15, 0)
/*
* Data Buffer Descriptor (in memory)
*/
#define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */
#define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */
#define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
struct hci_rh_data {
void __iomem *regs;
void *xfer, *resp, *ibi_status, *ibi_data;
dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
unsigned int done_ptr, ibi_chunk_ptr;
struct hci_xfer **src_xfers;
spinlock_t lock;
struct completion op_done;
};
struct hci_rings_data {
unsigned int total;
struct hci_rh_data headers[];
};
struct hci_dma_dev_ibi_data {
struct i3c_generic_ibi_pool *pool;
unsigned int max_len;
};
static inline u32 lo32(dma_addr_t physaddr)
{
return physaddr;
}
static inline u32 hi32(dma_addr_t physaddr)
{
/* trickery to avoid compiler warnings on 32-bit build targets */
if (sizeof(dma_addr_t) > 4) {
u64 hi = physaddr;
return hi >> 32;
}
return 0;
}
static void hci_dma_cleanup(struct i3c_hci *hci)
{
struct hci_rings_data *rings = hci->io_data;
struct hci_rh_data *rh;
unsigned int i;
if (!rings)
return;
for (i = 0; i < rings->total; i++) {
rh = &rings->headers[i];
rh_reg_write(RING_CONTROL, 0);
rh_reg_write(CR_SETUP, 0);
rh_reg_write(IBI_SETUP, 0);
rh_reg_write(INTR_SIGNAL_ENABLE, 0);
if (rh->xfer)
dma_free_coherent(&hci->master.dev,
rh->xfer_struct_sz * rh->xfer_entries,
rh->xfer, rh->xfer_dma);
if (rh->resp)
dma_free_coherent(&hci->master.dev,
rh->resp_struct_sz * rh->xfer_entries,
rh->resp, rh->resp_dma);
kfree(rh->src_xfers);
if (rh->ibi_status)
dma_free_coherent(&hci->master.dev,
rh->ibi_status_sz * rh->ibi_status_entries,
rh->ibi_status, rh->ibi_status_dma);
if (rh->ibi_data_dma)
dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
rh->ibi_chunk_sz * rh->ibi_chunks_total,
DMA_FROM_DEVICE);
kfree(rh->ibi_data);
}
rhs_reg_write(CONTROL, 0);
kfree(rings);
hci->io_data = NULL;
}
static int hci_dma_init(struct i3c_hci *hci)
{
struct hci_rings_data *rings;
struct hci_rh_data *rh;
u32 regval;
unsigned int i, nr_rings, xfers_sz, resps_sz;
unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
int ret;
regval = rhs_reg_read(CONTROL);
nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
if (unlikely(nr_rings > 8)) {
dev_err(&hci->master.dev, "number of rings should be <= 8\n");
nr_rings = 8;
}
if (nr_rings > XFER_RINGS)
nr_rings = XFER_RINGS;
rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL);
if (!rings)
return -ENOMEM;
hci->io_data = rings;
rings->total = nr_rings;
for (i = 0; i < rings->total; i++) {
u32 offset = rhs_reg_read(RHn_OFFSET(i));
dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
ret = -EINVAL;
if (!offset)
goto err_out;
rh = &rings->headers[i];
rh->regs = hci->base_regs + offset;
spin_lock_init(&rh->lock);
init_completion(&rh->op_done);
rh->xfer_entries = XFER_RING_ENTRIES;
regval = rh_reg_read(CR_SETUP);
rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
rh->xfer_struct_sz, rh->resp_struct_sz);
xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
resps_sz = rh->resp_struct_sz * rh->xfer_entries;
rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
&rh->xfer_dma, GFP_KERNEL);
rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
&rh->resp_dma, GFP_KERNEL);
rh->src_xfers =
kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
GFP_KERNEL);
ret = -ENOMEM;
if (!rh->xfer || !rh->resp || !rh->src_xfers)
goto err_out;
rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
rh_reg_write(CR_SETUP, regval);
rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
INTR_TRANSFER_COMPLETION |
INTR_RING_OP |
INTR_TRANSFER_ERR |
INTR_WARN_INS_STOP_MODE |
INTR_IBI_RING_FULL |
INTR_TRANSFER_ABORT);
/* IBIs */
if (i >= IBI_RINGS)
goto ring_ready;
regval = rh_reg_read(IBI_SETUP);
rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
rh->ibi_chunk_sz = dma_get_cache_alignment();
rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
BUG_ON(rh->ibi_chunk_sz > 256);
ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
rh->ibi_status =
dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
&rh->ibi_status_dma, GFP_KERNEL);
rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
ret = -ENOMEM;
if (!rh->ibi_status || !rh->ibi_data)
goto err_out;
rh->ibi_data_dma =
dma_map_single(&hci->master.dev, rh->ibi_data,
ibi_data_ring_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
rh->ibi_data_dma = 0;
ret = -ENOMEM;
goto err_out;
}
regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
rh->ibi_status_entries) |
FIELD_PREP(IBI_DATA_CHUNK_SIZE,
ilog2(rh->ibi_chunk_sz) - 2) |
FIELD_PREP(IBI_DATA_CHUNK_COUNT,
rh->ibi_chunks_total);
rh_reg_write(IBI_SETUP, regval);
regval = rh_reg_read(INTR_SIGNAL_ENABLE);
regval |= INTR_IBI_READY;
rh_reg_write(INTR_SIGNAL_ENABLE, regval);
ring_ready:
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
}
regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
rhs_reg_write(CONTROL, regval);
return 0;
err_out:
hci_dma_cleanup(hci);
return ret;
}
static void hci_dma_unmap_xfer(struct i3c_hci *hci,
struct hci_xfer *xfer_list, unsigned int n)
{
struct hci_xfer *xfer;
unsigned int i;
for (i = 0; i < n; i++) {
xfer = xfer_list + i;
dma_unmap_single(&hci->master.dev,
xfer->data_dma, xfer->data_len,
xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
static int hci_dma_queue_xfer(struct i3c_hci *hci,
struct hci_xfer *xfer_list, int n)
{
struct hci_rings_data *rings = hci->io_data;
struct hci_rh_data *rh;
unsigned int i, ring, enqueue_ptr;
u32 op1_val, op2_val;
/* For now we only use ring 0 */
ring = 0;
rh = &rings->headers[ring];
op1_val = rh_reg_read(RING_OPERATION1);
enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
/* store cmd descriptor */
*ring_data++ = xfer->cmd_desc[0];
*ring_data++ = xfer->cmd_desc[1];
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
*ring_data++ = xfer->cmd_desc[2];
*ring_data++ = xfer->cmd_desc[3];
}
/* first word of Data Buffer Descriptor Structure */
if (!xfer->data)
xfer->data_len = 0;
*ring_data++ =
FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
((i == n - 1) ? DATA_BUF_IOC : 0);
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
xfer->data_dma =
dma_map_single(&hci->master.dev,
xfer->data,
xfer->data_len,
xfer->rnw ?
DMA_FROM_DEVICE :
DMA_TO_DEVICE);
if (dma_mapping_error(&hci->master.dev,
xfer->data_dma)) {
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
*ring_data++ = lo32(xfer->data_dma);
*ring_data++ = hi32(xfer->data_dma);
} else {
*ring_data++ = 0;
*ring_data++ = 0;
}
/* remember corresponding xfer struct */
rh->src_xfers[enqueue_ptr] = xfer;
/* remember corresponding ring/entry for this xfer structure */
xfer->ring_number = ring;
xfer->ring_entry = enqueue_ptr;
enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
/*
* We may update the hardware view of the enqueue pointer
* only if we didn't reach its dequeue pointer.
*/
op2_val = rh_reg_read(RING_OPERATION2);
if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
/* the ring is full */
hci_dma_unmap_xfer(hci, xfer_list, i + 1);
return -EBUSY;
}
}
/* take care to update the hardware enqueue pointer atomically */
spin_lock_irq(&rh->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_CR_ENQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock_irq(&rh->lock);
return 0;
}
static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
struct hci_xfer *xfer_list, int n)
{
struct hci_rings_data *rings = hci->io_data;
struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
unsigned int i;
bool did_unqueue = false;
/* stop the ring */
rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
/*
* We're deep in it if ever this condition is ever met.
* Hardware might still be writing to memory, etc.
* Better suspend the world than risking silent corruption.
*/
dev_crit(&hci->master.dev, "unable to abort the ring\n");
BUG();
}
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
int idx = xfer->ring_entry;
/*
* At the time the abort happened, the xfer might have
* completed already. If not then replace corresponding
* descriptor entries with a no-op.
*/
if (idx >= 0) {
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
/* store no-op cmd descriptor */
*ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
*ring_data++ = 0;
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
*ring_data++ = 0;
*ring_data++ = 0;
}
/* disassociate this xfer struct */
rh->src_xfers[idx] = NULL;
/* and unmap it */
hci_dma_unmap_xfer(hci, xfer, 1);
did_unqueue = true;
}
}
/* restart the ring */
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
return did_unqueue;
}
static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
{
u32 op1_val, op2_val, resp, *ring_resp;
unsigned int tid, done_ptr = rh->done_ptr;
struct hci_xfer *xfer;
for (;;) {
op2_val = rh_reg_read(RING_OPERATION2);
if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
break;
ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
resp = *ring_resp;
tid = RESP_TID(resp);
DBG("resp = 0x%08x", resp);
xfer = rh->src_xfers[done_ptr];
if (!xfer) {
DBG("orphaned ring entry");
} else {
hci_dma_unmap_xfer(hci, xfer, 1);
xfer->ring_entry = -1;
xfer->response = resp;
if (tid != xfer->cmd_tid) {
dev_err(&hci->master.dev,
"response tid=%d when expecting %d\n",
tid, xfer->cmd_tid);
/* TODO: do something about it? */
}
if (xfer->completion)
complete(xfer->completion);
}
done_ptr = (done_ptr + 1) % rh->xfer_entries;
rh->done_ptr = done_ptr;
}
/* take care to update the software dequeue pointer atomically */
spin_lock(&rh->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock(&rh->lock);
}
static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
struct i3c_generic_ibi_pool *pool;
struct hci_dma_dev_ibi_data *dev_ibi;
dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
if (!dev_ibi)
return -ENOMEM;
pool = i3c_generic_ibi_alloc_pool(dev, req);
if (IS_ERR(pool)) {
kfree(dev_ibi);
return PTR_ERR(pool);
}
dev_ibi->pool = pool;
dev_ibi->max_len = req->max_payload_len;
dev_data->ibi_data = dev_ibi;
return 0;
}
static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
{
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
dev_data->ibi_data = NULL;
i3c_generic_ibi_free_pool(dev_ibi->pool);
kfree(dev_ibi);
}
static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
}
static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
{
struct i3c_dev_desc *dev;
struct i3c_hci_dev_data *dev_data;
struct hci_dma_dev_ibi_data *dev_ibi;
struct i3c_ibi_slot *slot;
u32 op1_val, op2_val, ibi_status_error;
unsigned int ptr, enq_ptr, deq_ptr;
unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
int ibi_addr, last_ptr;
void *ring_ibi_data;
dma_addr_t ring_ibi_data_dma;
op1_val = rh_reg_read(RING_OPERATION1);
deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
op2_val = rh_reg_read(RING_OPERATION2);
enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
ibi_status_error = 0;
ibi_addr = -1;
ibi_chunks = 0;
ibi_size = 0;
last_ptr = -1;
/* let's find all we can about this IBI */
for (ptr = deq_ptr; ptr != enq_ptr;
ptr = (ptr + 1) % rh->ibi_status_entries) {
u32 ibi_status, *ring_ibi_status;
unsigned int chunks;
ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
ibi_status = *ring_ibi_status;
DBG("status = %#x", ibi_status);
if (ibi_status_error) {
/* we no longer care */
} else if (ibi_status & IBI_ERROR) {
ibi_status_error = ibi_status;
} else if (ibi_addr == -1) {
ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
} else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
/* the address changed unexpectedly */
ibi_status_error = ibi_status;
}
chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
ibi_chunks += chunks;
if (!(ibi_status & IBI_LAST_STATUS)) {
ibi_size += chunks * rh->ibi_chunk_sz;
} else {
ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
last_ptr = ptr;
break;
}
}
/* validate what we've got */
if (last_ptr == -1) {
/* this IBI sequence is not yet complete */
DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
return;
}
deq_ptr = last_ptr + 1;
deq_ptr %= rh->ibi_status_entries;
if (ibi_status_error) {
dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
goto done;
}
/* determine who this is for */
dev = i3c_hci_addr_to_dev(hci, ibi_addr);
if (!dev) {
dev_err(&hci->master.dev,
"IBI for unknown device %#x\n", ibi_addr);
goto done;
}
dev_data = i3c_dev_get_master_data(dev);
dev_ibi = dev_data->ibi_data;
if (ibi_size > dev_ibi->max_len) {
dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
ibi_size, dev_ibi->max_len);
goto done;
}
/*
* This ring model is not suitable for zero-copy processing of IBIs.
* We have the data chunk ring wrap-around to deal with, meaning
* that the payload might span multiple chunks beginning at the
* end of the ring and wrap to the start of the ring. Furthermore
* there is no guarantee that those chunks will be released in order
* and in a timely manner by the upper driver. So let's just copy
* them to a discrete buffer. In practice they're supposed to be
* small anyway.
*/
slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
if (!slot) {
dev_err(&hci->master.dev, "no free slot for IBI\n");
goto done;
}
/* copy first part of the payload */
ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
ring_ibi_data = rh->ibi_data + ibi_data_offset;
ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
* rh->ibi_chunk_sz;
if (first_part > ibi_size)
first_part = ibi_size;
dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
first_part, DMA_FROM_DEVICE);
memcpy(slot->data, ring_ibi_data, first_part);
/* copy second part if any */
if (ibi_size > first_part) {
/* we wrap back to the start and copy remaining data */
ring_ibi_data = rh->ibi_data;
ring_ibi_data_dma = rh->ibi_data_dma;
dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
ibi_size - first_part, DMA_FROM_DEVICE);
memcpy(slot->data + first_part, ring_ibi_data,
ibi_size - first_part);
}
/* submit it */
slot->dev = dev;
slot->len = ibi_size;
i3c_master_queue_ibi(dev, slot);
done:
/* take care to update the ibi dequeue pointer atomically */
spin_lock(&rh->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_IBI_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock(&rh->lock);
/* update the chunk pointer */
rh->ibi_chunk_ptr += ibi_chunks;
rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
/* and tell the hardware about freed chunks */
rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
}
static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
{
struct hci_rings_data *rings = hci->io_data;
unsigned int i;
bool handled = false;
for (i = 0; mask && i < 8; i++) {
struct hci_rh_data *rh;
u32 status;
if (!(mask & BIT(i)))
continue;
mask &= ~BIT(i);
rh = &rings->headers[i];
status = rh_reg_read(INTR_STATUS);
DBG("rh%d status: %#x", i, status);
if (!status)
continue;
rh_reg_write(INTR_STATUS, status);
if (status & INTR_IBI_READY)
hci_dma_process_ibi(hci, rh);
if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
hci_dma_xfer_done(hci, rh);
if (status & INTR_RING_OP)
complete(&rh->op_done);
if (status & INTR_TRANSFER_ABORT)
dev_notice_ratelimited(&hci->master.dev,
"ring %d: Transfer Aborted\n", i);
if (status & INTR_WARN_INS_STOP_MODE)
dev_warn_ratelimited(&hci->master.dev,
"ring %d: Inserted Stop on Mode Change\n", i);
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
"ring %d: IBI Ring Full Condition\n", i);
handled = true;
}
return handled;
}
const struct hci_io_ops mipi_i3c_hci_dma = {
.init = hci_dma_init,
.cleanup = hci_dma_cleanup,
.queue_xfer = hci_dma_queue_xfer,
.dequeue_xfer = hci_dma_dequeue_xfer,
.irq_handler = hci_dma_irq_handler,
.request_ibi = hci_dma_request_ibi,
.free_ibi = hci_dma_free_ibi,
.recycle_ibi_slot = hci_dma_recycle_ibi_slot,
};
| linux-master | drivers/i3c/master/mipi-i3c-hci/dma.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
#include "hci.h"
#include "cmd.h"
#include "ibi.h"
/*
* PIO Access Area
*/
#define pio_reg_read(r) readl(hci->PIO_regs + (PIO_##r))
#define pio_reg_write(r, v) writel(v, hci->PIO_regs + (PIO_##r))
#define PIO_COMMAND_QUEUE_PORT 0x00
#define PIO_RESPONSE_QUEUE_PORT 0x04
#define PIO_XFER_DATA_PORT 0x08
#define PIO_IBI_PORT 0x0c
#define PIO_QUEUE_THLD_CTRL 0x10
#define QUEUE_IBI_STATUS_THLD GENMASK(31, 24)
#define QUEUE_IBI_DATA_THLD GENMASK(23, 16)
#define QUEUE_RESP_BUF_THLD GENMASK(15, 8)
#define QUEUE_CMD_EMPTY_BUF_THLD GENMASK(7, 0)
#define PIO_DATA_BUFFER_THLD_CTRL 0x14
#define DATA_RX_START_THLD GENMASK(26, 24)
#define DATA_TX_START_THLD GENMASK(18, 16)
#define DATA_RX_BUF_THLD GENMASK(10, 8)
#define DATA_TX_BUF_THLD GENMASK(2, 0)
#define PIO_QUEUE_SIZE 0x18
#define TX_DATA_BUFFER_SIZE GENMASK(31, 24)
#define RX_DATA_BUFFER_SIZE GENMASK(23, 16)
#define IBI_STATUS_SIZE GENMASK(15, 8)
#define CR_QUEUE_SIZE GENMASK(7, 0)
#define PIO_INTR_STATUS 0x20
#define PIO_INTR_STATUS_ENABLE 0x24
#define PIO_INTR_SIGNAL_ENABLE 0x28
#define PIO_INTR_FORCE 0x2c
#define STAT_TRANSFER_BLOCKED BIT(25)
#define STAT_PERR_RESP_UFLOW BIT(24)
#define STAT_PERR_CMD_OFLOW BIT(23)
#define STAT_PERR_IBI_UFLOW BIT(22)
#define STAT_PERR_RX_UFLOW BIT(21)
#define STAT_PERR_TX_OFLOW BIT(20)
#define STAT_ERR_RESP_QUEUE_FULL BIT(19)
#define STAT_WARN_RESP_QUEUE_FULL BIT(18)
#define STAT_ERR_IBI_QUEUE_FULL BIT(17)
#define STAT_WARN_IBI_QUEUE_FULL BIT(16)
#define STAT_ERR_RX_DATA_FULL BIT(15)
#define STAT_WARN_RX_DATA_FULL BIT(14)
#define STAT_ERR_TX_DATA_EMPTY BIT(13)
#define STAT_WARN_TX_DATA_EMPTY BIT(12)
#define STAT_TRANSFER_ERR BIT(9)
#define STAT_WARN_INS_STOP_MODE BIT(7)
#define STAT_TRANSFER_ABORT BIT(5)
#define STAT_RESP_READY BIT(4)
#define STAT_CMD_QUEUE_READY BIT(3)
#define STAT_IBI_STATUS_THLD BIT(2)
#define STAT_RX_THLD BIT(1)
#define STAT_TX_THLD BIT(0)
#define PIO_QUEUE_CUR_STATUS 0x38
#define CUR_IBI_Q_LEVEL GENMASK(28, 20)
#define CUR_RESP_Q_LEVEL GENMASK(18, 10)
#define CUR_CMD_Q_EMPTY_LEVEL GENMASK(8, 0)
#define PIO_DATA_BUFFER_CUR_STATUS 0x3c
#define CUR_RX_BUF_LVL GENMASK(26, 16)
#define CUR_TX_BUF_LVL GENMASK(10, 0)
/*
* Handy status bit combinations
*/
#define STAT_LATENCY_WARNINGS (STAT_WARN_RESP_QUEUE_FULL | \
STAT_WARN_IBI_QUEUE_FULL | \
STAT_WARN_RX_DATA_FULL | \
STAT_WARN_TX_DATA_EMPTY | \
STAT_WARN_INS_STOP_MODE)
#define STAT_LATENCY_ERRORS (STAT_ERR_RESP_QUEUE_FULL | \
STAT_ERR_IBI_QUEUE_FULL | \
STAT_ERR_RX_DATA_FULL | \
STAT_ERR_TX_DATA_EMPTY)
#define STAT_PROG_ERRORS (STAT_TRANSFER_BLOCKED | \
STAT_PERR_RESP_UFLOW | \
STAT_PERR_CMD_OFLOW | \
STAT_PERR_IBI_UFLOW | \
STAT_PERR_RX_UFLOW | \
STAT_PERR_TX_OFLOW)
#define STAT_ALL_ERRORS (STAT_TRANSFER_ABORT | \
STAT_TRANSFER_ERR | \
STAT_LATENCY_ERRORS | \
STAT_PROG_ERRORS)
struct hci_pio_dev_ibi_data {
struct i3c_generic_ibi_pool *pool;
unsigned int max_len;
};
struct hci_pio_ibi_data {
struct i3c_ibi_slot *slot;
void *data_ptr;
unsigned int addr;
unsigned int seg_len, seg_cnt;
unsigned int max_len;
bool last_seg;
};
struct hci_pio_data {
spinlock_t lock;
struct hci_xfer *curr_xfer, *xfer_queue;
struct hci_xfer *curr_rx, *rx_queue;
struct hci_xfer *curr_tx, *tx_queue;
struct hci_xfer *curr_resp, *resp_queue;
struct hci_pio_ibi_data ibi;
unsigned int rx_thresh_size, tx_thresh_size;
unsigned int max_ibi_thresh;
u32 reg_queue_thresh;
u32 enabled_irqs;
};
static int hci_pio_init(struct i3c_hci *hci)
{
struct hci_pio_data *pio;
u32 val, size_val, rx_thresh, tx_thresh, ibi_val;
pio = kzalloc(sizeof(*pio), GFP_KERNEL);
if (!pio)
return -ENOMEM;
hci->io_data = pio;
spin_lock_init(&pio->lock);
size_val = pio_reg_read(QUEUE_SIZE);
dev_info(&hci->master.dev, "CMD/RESP FIFO = %ld entries\n",
FIELD_GET(CR_QUEUE_SIZE, size_val));
dev_info(&hci->master.dev, "IBI FIFO = %ld bytes\n",
4 * FIELD_GET(IBI_STATUS_SIZE, size_val));
dev_info(&hci->master.dev, "RX data FIFO = %d bytes\n",
4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val)));
dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n",
4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val)));
/*
* Let's initialize data thresholds to half of the actual FIFO size.
* The start thresholds aren't used (set to 0) as the FIFO is always
* serviced before the corresponding command is queued.
*/
rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val);
tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val);
if (hci->version_major == 1) {
/* those are expressed as 2^[n+1), so just sub 1 if not 0 */
if (rx_thresh)
rx_thresh -= 1;
if (tx_thresh)
tx_thresh -= 1;
pio->rx_thresh_size = 2 << rx_thresh;
pio->tx_thresh_size = 2 << tx_thresh;
} else {
/* size is 2^(n+1) and threshold is 2^n i.e. already halved */
pio->rx_thresh_size = 1 << rx_thresh;
pio->tx_thresh_size = 1 << tx_thresh;
}
val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) |
FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh);
pio_reg_write(DATA_BUFFER_THLD_CTRL, val);
/*
* Let's raise an interrupt as soon as there is one free cmd slot
* or one available response or IBI. For IBI data let's use half the
* IBI queue size within allowed bounds.
*/
ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
pio_reg_write(QUEUE_THLD_CTRL, val);
pio->reg_queue_thresh = val;
/* Disable all IRQs but allow all status bits */
pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
/* Always accept error interrupts (will be activated on first xfer) */
pio->enabled_irqs = STAT_ALL_ERRORS;
return 0;
}
static void hci_pio_cleanup(struct i3c_hci *hci)
{
struct hci_pio_data *pio = hci->io_data;
pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
if (pio) {
DBG("status = %#x/%#x",
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
BUG_ON(pio->curr_xfer);
BUG_ON(pio->curr_rx);
BUG_ON(pio->curr_tx);
BUG_ON(pio->curr_resp);
kfree(pio);
hci->io_data = NULL;
}
}
static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
{
DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
}
}
static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
{
struct hci_xfer *xfer = pio->curr_rx;
unsigned int nr_words;
u32 *p;
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
while (xfer->data_left >= 4) {
/* bail out if FIFO hasn't reached the threshold value yet */
if (!(pio_reg_read(INTR_STATUS) & STAT_RX_THLD))
return false;
nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
DBG("now %d left %d", nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
/* trailing data is retrieved upon response reception */
return !xfer->data_left;
}
static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
struct hci_pio_data *pio, unsigned int count)
{
struct hci_xfer *xfer = pio->curr_rx;
u32 *p;
DBG("%d remaining", count);
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
if (count >= 4) {
unsigned int nr_words = count / 4;
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
DBG("now %d left %d", nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
count &= 3;
if (count) {
/*
* There are trailing bytes in the last word.
* Fetch it and extract bytes in an endian independent way.
* Unlike the TX case, we must not write memory past the
* end of the destination buffer.
*/
u8 *p_byte = (u8 *)p;
u32 data = pio_reg_read(XFER_DATA_PORT);
xfer->data_word_before_partial = data;
xfer->data_left -= count;
data = (__force u32) cpu_to_le32(data);
while (count--) {
*p_byte++ = data;
data >>= 8;
}
}
}
static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
{
struct hci_xfer *xfer = pio->curr_tx;
unsigned int nr_words;
u32 *p;
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
while (xfer->data_left >= 4) {
/* bail out if FIFO free space is below set threshold */
if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
return false;
/* we can fill up to that TX threshold */
nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
/* push data into the FIFO */
xfer->data_left -= nr_words * 4;
DBG("now %d left %d", nr_words * 4, xfer->data_left);
while (nr_words--)
pio_reg_write(XFER_DATA_PORT, *p++);
}
if (xfer->data_left) {
/*
* There are trailing bytes to send. We can simply load
* them from memory as a word which will keep those bytes
* in their proper place even on a BE system. This will
* also get some bytes past the actual buffer but no one
* should care as they won't be sent out.
*/
if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
return false;
DBG("trailing %d", xfer->data_left);
pio_reg_write(XFER_DATA_PORT, *p);
xfer->data_left = 0;
}
return true;
}
static bool hci_pio_process_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
{
while (pio->curr_rx && hci_pio_do_rx(hci, pio))
pio->curr_rx = pio->curr_rx->next_data;
return !pio->curr_rx;
}
static bool hci_pio_process_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
{
while (pio->curr_tx && hci_pio_do_tx(hci, pio))
pio->curr_tx = pio->curr_tx->next_data;
return !pio->curr_tx;
}
static void hci_pio_queue_data(struct i3c_hci *hci, struct hci_pio_data *pio)
{
struct hci_xfer *xfer = pio->curr_xfer;
struct hci_xfer *prev_queue_tail;
if (!xfer->data) {
xfer->data_len = xfer->data_left = 0;
return;
}
if (xfer->rnw) {
prev_queue_tail = pio->rx_queue;
pio->rx_queue = xfer;
if (pio->curr_rx) {
prev_queue_tail->next_data = xfer;
} else {
pio->curr_rx = xfer;
if (!hci_pio_process_rx(hci, pio))
pio->enabled_irqs |= STAT_RX_THLD;
}
} else {
prev_queue_tail = pio->tx_queue;
pio->tx_queue = xfer;
if (pio->curr_tx) {
prev_queue_tail->next_data = xfer;
} else {
pio->curr_tx = xfer;
if (!hci_pio_process_tx(hci, pio))
pio->enabled_irqs |= STAT_TX_THLD;
}
}
}
static void hci_pio_push_to_next_rx(struct i3c_hci *hci, struct hci_xfer *xfer,
unsigned int words_to_keep)
{
u32 *from = xfer->data;
u32 from_last;
unsigned int received, count;
received = (xfer->data_len - xfer->data_left) / 4;
if ((xfer->data_len - xfer->data_left) & 3) {
from_last = xfer->data_word_before_partial;
received += 1;
} else {
from_last = from[received];
}
from += words_to_keep;
count = received - words_to_keep;
while (count) {
unsigned int room, left, chunk, bytes_to_move;
u32 last_word;
xfer = xfer->next_data;
if (!xfer) {
dev_err(&hci->master.dev, "pushing RX data to unexistent xfer\n");
return;
}
room = DIV_ROUND_UP(xfer->data_len, 4);
left = DIV_ROUND_UP(xfer->data_left, 4);
chunk = min(count, room);
if (chunk > left) {
hci_pio_push_to_next_rx(hci, xfer, chunk - left);
left = chunk;
xfer->data_left = left * 4;
}
bytes_to_move = xfer->data_len - xfer->data_left;
if (bytes_to_move & 3) {
/* preserve word to become partial */
u32 *p = xfer->data;
xfer->data_word_before_partial = p[bytes_to_move / 4];
}
memmove(xfer->data + chunk, xfer->data, bytes_to_move);
/* treat last word specially because of partial word issues */
chunk -= 1;
memcpy(xfer->data, from, chunk * 4);
xfer->data_left -= chunk * 4;
from += chunk;
count -= chunk;
last_word = (count == 1) ? from_last : *from++;
if (xfer->data_left < 4) {
/*
* Like in hci_pio_do_trailing_rx(), preserve original
* word to be stored partially then store bytes it
* in an endian independent way.
*/
u8 *p_byte = xfer->data;
p_byte += chunk * 4;
xfer->data_word_before_partial = last_word;
last_word = (__force u32) cpu_to_le32(last_word);
while (xfer->data_left--) {
*p_byte++ = last_word;
last_word >>= 8;
}
} else {
u32 *p = xfer->data;
p[chunk] = last_word;
xfer->data_left -= 4;
}
count--;
}
}
static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
u32 status);
static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
{
while (pio->curr_resp &&
(pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
struct hci_xfer *xfer = pio->curr_resp;
u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
unsigned int tid = RESP_TID(resp);
DBG("resp = 0x%08x", resp);
if (tid != xfer->cmd_tid) {
dev_err(&hci->master.dev,
"response tid=%d when expecting %d\n",
tid, xfer->cmd_tid);
/* let's pretend it is a prog error... any of them */
hci_pio_err(hci, pio, STAT_PROG_ERRORS);
return false;
}
xfer->response = resp;
if (pio->curr_rx == xfer) {
/*
* Response availability implies RX completion.
* Retrieve trailing RX data if any.
* Note that short reads are possible.
*/
unsigned int received, expected, to_keep;
received = xfer->data_len - xfer->data_left;
expected = RESP_DATA_LENGTH(xfer->response);
if (expected > received) {
hci_pio_do_trailing_rx(hci, pio,
expected - received);
} else if (received > expected) {
/* we consumed data meant for next xfer */
to_keep = DIV_ROUND_UP(expected, 4);
hci_pio_push_to_next_rx(hci, xfer, to_keep);
}
/* then process the RX list pointer */
if (hci_pio_process_rx(hci, pio))
pio->enabled_irqs &= ~STAT_RX_THLD;
}
/*
* We're about to give back ownership of the xfer structure
* to the waiting instance. Make sure no reference to it
* still exists.
*/
if (pio->curr_rx == xfer) {
DBG("short RX ?");
pio->curr_rx = pio->curr_rx->next_data;
} else if (pio->curr_tx == xfer) {
DBG("short TX ?");
pio->curr_tx = pio->curr_tx->next_data;
} else if (xfer->data_left) {
DBG("PIO xfer count = %d after response",
xfer->data_left);
}
pio->curr_resp = xfer->next_resp;
if (xfer->completion)
complete(xfer->completion);
}
return !pio->curr_resp;
}
static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
{
struct hci_xfer *xfer = pio->curr_xfer;
struct hci_xfer *prev_queue_tail;
if (!(xfer->cmd_desc[0] & CMD_0_ROC))
return;
prev_queue_tail = pio->resp_queue;
pio->resp_queue = xfer;
if (pio->curr_resp) {
prev_queue_tail->next_resp = xfer;
} else {
pio->curr_resp = xfer;
if (!hci_pio_process_resp(hci, pio))
pio->enabled_irqs |= STAT_RESP_READY;
}
}
static bool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio)
{
while (pio->curr_xfer &&
(pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) {
/*
* Always process the data FIFO before sending the command
* so needed TX data or RX space is available upfront.
*/
hci_pio_queue_data(hci, pio);
/*
* Then queue our response request. This will also process
* the response FIFO in case it got suddenly filled up
* with results from previous commands.
*/
hci_pio_queue_resp(hci, pio);
/*
* Finally send the command.
*/
hci_pio_write_cmd(hci, pio->curr_xfer);
/*
* And move on.
*/
pio->curr_xfer = pio->curr_xfer->next_xfer;
}
return !pio->curr_xfer;
}
static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
{
struct hci_pio_data *pio = hci->io_data;
struct hci_xfer *prev_queue_tail;
int i;
DBG("n = %d", n);
/* link xfer instances together and initialize data count */
for (i = 0; i < n; i++) {
xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL;
xfer[i].next_data = NULL;
xfer[i].next_resp = NULL;
xfer[i].data_left = xfer[i].data_len;
}
spin_lock_irq(&pio->lock);
prev_queue_tail = pio->xfer_queue;
pio->xfer_queue = &xfer[n - 1];
if (pio->curr_xfer) {
prev_queue_tail->next_xfer = xfer;
} else {
pio->curr_xfer = xfer;
if (!hci_pio_process_cmd(hci, pio))
pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
DBG("status = %#x/%#x",
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
}
spin_unlock_irq(&pio->lock);
return 0;
}
static bool hci_pio_dequeue_xfer_common(struct i3c_hci *hci,
struct hci_pio_data *pio,
struct hci_xfer *xfer, int n)
{
struct hci_xfer *p, **p_prev_next;
int i;
/*
* To safely dequeue a transfer request, it must be either entirely
* processed, or not yet processed at all. If our request tail is
* reachable from either the data or resp list that means the command
* was submitted and not yet completed.
*/
for (p = pio->curr_resp; p; p = p->next_resp)
for (i = 0; i < n; i++)
if (p == &xfer[i])
goto pio_screwed;
for (p = pio->curr_rx; p; p = p->next_data)
for (i = 0; i < n; i++)
if (p == &xfer[i])
goto pio_screwed;
for (p = pio->curr_tx; p; p = p->next_data)
for (i = 0; i < n; i++)
if (p == &xfer[i])
goto pio_screwed;
/*
* The command was completed, or wasn't yet submitted.
* Unlink it from the que if the later.
*/
p_prev_next = &pio->curr_xfer;
for (p = pio->curr_xfer; p; p = p->next_xfer) {
if (p == &xfer[0]) {
*p_prev_next = xfer[n - 1].next_xfer;
break;
}
p_prev_next = &p->next_xfer;
}
/* return true if we actually unqueued something */
return !!p;
pio_screwed:
/*
* Life is tough. We must invalidate the hardware state and
* discard everything that is still queued.
*/
for (p = pio->curr_resp; p; p = p->next_resp) {
p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
if (p->completion)
complete(p->completion);
}
for (p = pio->curr_xfer; p; p = p->next_xfer) {
p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
if (p->completion)
complete(p->completion);
}
pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL;
return true;
}
static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
{
struct hci_pio_data *pio = hci->io_data;
int ret;
spin_lock_irq(&pio->lock);
DBG("n=%d status=%#x/%#x", n,
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
DBG("main_status = %#x/%#x",
readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock);
return ret;
}
static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
u32 status)
{
/* TODO: this ought to be more sophisticated eventually */
if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) {
/* this may happen when an error is signaled with ROC unset */
u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
dev_err(&hci->master.dev,
"orphan response (%#x) on error\n", resp);
}
/* dump states on programming errors */
if (status & STAT_PROG_ERRORS) {
u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
dev_err(&hci->master.dev,
"prog error %#lx (C/R/I = %ld/%ld/%ld, TX/RX = %ld/%ld)\n",
status & STAT_PROG_ERRORS,
FIELD_GET(CUR_CMD_Q_EMPTY_LEVEL, queue),
FIELD_GET(CUR_RESP_Q_LEVEL, queue),
FIELD_GET(CUR_IBI_Q_LEVEL, queue),
FIELD_GET(CUR_TX_BUF_LVL, data),
FIELD_GET(CUR_RX_BUF_LVL, data));
}
/* just bust out everything with pending responses for now */
hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1);
/* ... and half-way TX transfers if any */
if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len)
hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1);
/* then reset the hardware */
mipi_i3c_hci_pio_reset(hci);
mipi_i3c_hci_resume(hci);
DBG("status=%#x/%#x",
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
}
static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
struct hci_pio_data *pio,
unsigned int thresh_val)
{
u32 regval = pio->reg_queue_thresh;
regval &= ~QUEUE_IBI_STATUS_THLD;
regval |= FIELD_PREP(QUEUE_IBI_STATUS_THLD, thresh_val);
/* write the threshold reg only if it changes */
if (regval != pio->reg_queue_thresh) {
pio_reg_write(QUEUE_THLD_CTRL, regval);
pio->reg_queue_thresh = regval;
DBG("%d", thresh_val);
}
}
static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
struct hci_pio_data *pio)
{
struct hci_pio_ibi_data *ibi = &pio->ibi;
unsigned int nr_words, thresh_val;
u32 *p;
p = ibi->data_ptr;
p += (ibi->seg_len - ibi->seg_cnt) / 4;
while ((nr_words = ibi->seg_cnt/4)) {
/* determine our IBI queue threshold value */
thresh_val = min(nr_words, pio->max_ibi_thresh);
hci_pio_set_ibi_thresh(hci, pio, thresh_val);
/* bail out if we don't have that amount of data ready */
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
/* extract the data from the IBI port */
nr_words = thresh_val;
ibi->seg_cnt -= nr_words * 4;
DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
while (nr_words--)
*p++ = pio_reg_read(IBI_PORT);
}
if (ibi->seg_cnt) {
/*
* There are trailing bytes in the last word.
* Fetch it and extract bytes in an endian independent way.
* Unlike the TX case, we must not write past the end of
* the destination buffer.
*/
u32 data;
u8 *p_byte = (u8 *)p;
hci_pio_set_ibi_thresh(hci, pio, 1);
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
DBG("trailing %d", ibi->seg_cnt);
data = pio_reg_read(IBI_PORT);
data = (__force u32) cpu_to_le32(data);
while (ibi->seg_cnt--) {
*p_byte++ = data;
data >>= 8;
}
}
return true;
}
static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
{
struct hci_pio_ibi_data *ibi = &pio->ibi;
struct i3c_dev_desc *dev;
struct i3c_hci_dev_data *dev_data;
struct hci_pio_dev_ibi_data *dev_ibi;
u32 ibi_status;
/*
* We have a new IBI. Try to set up its payload retrieval.
* When returning true, the IBI data has to be consumed whether
* or not we are set up to capture it. If we return true with
* ibi->slot == NULL that means the data payload has to be
* drained out of the IBI port and dropped.
*/
ibi_status = pio_reg_read(IBI_PORT);
DBG("status = %#x", ibi_status);
ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
if (ibi_status & IBI_ERROR) {
dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
return false;
}
ibi->last_seg = ibi_status & IBI_LAST_STATUS;
ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
ibi->seg_cnt = ibi->seg_len;
dev = i3c_hci_addr_to_dev(hci, ibi->addr);
if (!dev) {
dev_err(&hci->master.dev,
"IBI for unknown device %#x\n", ibi->addr);
return true;
}
dev_data = i3c_dev_get_master_data(dev);
dev_ibi = dev_data->ibi_data;
ibi->max_len = dev_ibi->max_len;
if (ibi->seg_len > ibi->max_len) {
dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
ibi->seg_len, ibi->max_len);
return true;
}
ibi->slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
if (!ibi->slot) {
dev_err(&hci->master.dev, "no free slot for IBI\n");
} else {
ibi->slot->len = 0;
ibi->data_ptr = ibi->slot->data;
}
return true;
}
static void hci_pio_free_ibi_slot(struct i3c_hci *hci, struct hci_pio_data *pio)
{
struct hci_pio_ibi_data *ibi = &pio->ibi;
struct hci_pio_dev_ibi_data *dev_ibi;
if (ibi->slot) {
dev_ibi = ibi->slot->dev->common.master_priv;
i3c_generic_ibi_recycle_slot(dev_ibi->pool, ibi->slot);
ibi->slot = NULL;
}
}
static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
{
struct hci_pio_ibi_data *ibi = &pio->ibi;
if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg)
if (!hci_pio_prep_new_ibi(hci, pio))
return false;
for (;;) {
u32 ibi_status;
unsigned int ibi_addr;
if (ibi->slot) {
if (!hci_pio_get_ibi_segment(hci, pio))
return false;
ibi->slot->len += ibi->seg_len;
ibi->data_ptr += ibi->seg_len;
if (ibi->last_seg) {
/* was the last segment: submit it and leave */
i3c_master_queue_ibi(ibi->slot->dev, ibi->slot);
ibi->slot = NULL;
hci_pio_set_ibi_thresh(hci, pio, 1);
return true;
}
} else if (ibi->seg_cnt) {
/*
* No slot but a non-zero count. This is the result
* of some error and the payload must be drained.
* This normally does not happen therefore no need
* to be extra optimized here.
*/
hci_pio_set_ibi_thresh(hci, pio, 1);
do {
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
pio_reg_read(IBI_PORT);
} while (--ibi->seg_cnt);
if (ibi->last_seg)
return true;
}
/* try to move to the next segment right away */
hci_pio_set_ibi_thresh(hci, pio, 1);
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
ibi_status = pio_reg_read(IBI_PORT);
ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
if (ibi->addr != ibi_addr) {
/* target address changed before last segment */
dev_err(&hci->master.dev,
"unexp IBI address changed from %d to %d\n",
ibi->addr, ibi_addr);
hci_pio_free_ibi_slot(hci, pio);
}
ibi->last_seg = ibi_status & IBI_LAST_STATUS;
ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
ibi->seg_cnt = ibi->seg_len;
if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) {
dev_err(&hci->master.dev,
"IBI payload too big (%d > %d)\n",
ibi->slot->len + ibi->seg_len, ibi->max_len);
hci_pio_free_ibi_slot(hci, pio);
}
}
return false;
}
static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req)
{
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
struct i3c_generic_ibi_pool *pool;
struct hci_pio_dev_ibi_data *dev_ibi;
dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
if (!dev_ibi)
return -ENOMEM;
pool = i3c_generic_ibi_alloc_pool(dev, req);
if (IS_ERR(pool)) {
kfree(dev_ibi);
return PTR_ERR(pool);
}
dev_ibi->pool = pool;
dev_ibi->max_len = req->max_payload_len;
dev_data->ibi_data = dev_ibi;
return 0;
}
static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
{
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
dev_data->ibi_data = NULL;
i3c_generic_ibi_free_pool(dev_ibi->pool);
kfree(dev_ibi);
}
static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci,
struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
}
static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
{
struct hci_pio_data *pio = hci->io_data;
u32 status;
spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
if (!status) {
spin_unlock(&pio->lock);
return false;
}
if (status & STAT_IBI_STATUS_THLD)
hci_pio_process_ibi(hci, pio);
if (status & STAT_RX_THLD)
if (hci_pio_process_rx(hci, pio))
pio->enabled_irqs &= ~STAT_RX_THLD;
if (status & STAT_TX_THLD)
if (hci_pio_process_tx(hci, pio))
pio->enabled_irqs &= ~STAT_TX_THLD;
if (status & STAT_RESP_READY)
if (hci_pio_process_resp(hci, pio))
pio->enabled_irqs &= ~STAT_RESP_READY;
if (unlikely(status & STAT_LATENCY_WARNINGS)) {
pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS);
dev_warn_ratelimited(&hci->master.dev,
"encountered warning condition %#lx\n",
status & STAT_LATENCY_WARNINGS);
}
if (unlikely(status & STAT_ALL_ERRORS)) {
pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS);
hci_pio_err(hci, pio, status & STAT_ALL_ERRORS);
}
if (status & STAT_CMD_QUEUE_READY)
if (hci_pio_process_cmd(hci, pio))
pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
DBG("(out) status: %#x/%#x",
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
spin_unlock(&pio->lock);
return true;
}
const struct hci_io_ops mipi_i3c_hci_pio = {
.init = hci_pio_init,
.cleanup = hci_pio_cleanup,
.queue_xfer = hci_pio_queue_xfer,
.dequeue_xfer = hci_pio_dequeue_xfer,
.irq_handler = hci_pio_irq_handler,
.request_ibi = hci_pio_request_ibi,
.free_ibi = hci_pio_free_ibi,
.recycle_ibi_slot = hci_pio_recycle_ibi_slot,
};
| linux-master | drivers/i3c/master/mipi-i3c-hci/pio.c |
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2020, MIPI Alliance, Inc.
*
* Author: Nicolas Pitre <[email protected]>
*/
#include <linux/device.h>
#include <linux/bitfield.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
#include "hci.h"
#include "dct.h"
/*
* Device Characteristic Table
*/
void i3c_hci_dct_get_val(struct i3c_hci *hci, unsigned int dct_idx,
u64 *pid, unsigned int *dcr, unsigned int *bcr)
{
void __iomem *reg = hci->DCT_regs + dct_idx * 4 * 4;
u32 dct_entry_data[4];
unsigned int i;
for (i = 0; i < 4; i++) {
dct_entry_data[i] = readl(reg);
reg += 4;
}
*pid = ((u64)dct_entry_data[0]) << (47 - 32 + 1) |
FIELD_GET(W1_MASK(47, 32), dct_entry_data[1]);
*dcr = FIELD_GET(W2_MASK(71, 64), dct_entry_data[2]);
*bcr = FIELD_GET(W2_MASK(79, 72), dct_entry_data[2]);
}
| linux-master | drivers/i3c/master/mipi-i3c-hci/dct_v1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* connector.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <[email protected]>
* All rights reserved.
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <net/netlink.h>
#include <linux/moduleparam.h>
#include <linux/connector.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <net/sock.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <[email protected]>");
MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
static struct cn_dev cdev;
static int cn_already_initialized;
/*
* Sends mult (multiple) cn_msg at a time.
*
* msg->seq and msg->ack are used to determine message genealogy.
* When someone sends message it puts there locally unique sequence
* and random acknowledge numbers. Sequence number may be copied into
* nlmsghdr->nlmsg_seq too.
*
* Sequence number is incremented with each message to be sent.
*
* If we expect a reply to our message then the sequence number in
* received message MUST be the same as in original message, and
* acknowledge number MUST be the same + 1.
*
* If we receive a message and its sequence number is not equal to the
* one we are expecting then it is a new message.
*
* If we receive a message and its sequence number is the same as one
* we are expecting but it's acknowledgement number is not equal to
* the acknowledgement number in the original message + 1, then it is
* a new message.
*
* If msg->len != len, then additional cn_msg messages are expected following
* the first msg.
*
* The message is sent to, the portid if given, the group if given, both if
* both, or if both are zero then the group is looked up and sent there.
*/
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
gfp_t gfp_mask,
int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
void *filter_data)
{
struct cn_callback_entry *__cbq;
unsigned int size;
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct cn_msg *data;
struct cn_dev *dev = &cdev;
u32 group = 0;
int found = 0;
if (portid || __group) {
group = __group;
} else {
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list,
callback_entry) {
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
found = 1;
group = __cbq->group;
break;
}
}
spin_unlock_bh(&dev->cbdev->queue_lock);
if (!found)
return -ENODEV;
}
if (!portid && !netlink_has_listeners(dev->nls, group))
return -ESRCH;
size = sizeof(*msg) + len;
skb = nlmsg_new(size, gfp_mask);
if (!skb)
return -ENOMEM;
nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
if (!nlh) {
kfree_skb(skb);
return -EMSGSIZE;
}
data = nlmsg_data(nlh);
memcpy(data, msg, size);
NETLINK_CB(skb).dst_group = group;
if (group)
return netlink_broadcast_filtered(dev->nls, skb, portid, group,
gfp_mask, filter,
(void *)filter_data);
return netlink_unicast(dev->nls, skb, portid,
!gfpflags_allow_blocking(gfp_mask));
}
EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
/* same as cn_netlink_send_mult except msg->len is used for len */
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
gfp_t gfp_mask)
{
return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask,
NULL, NULL);
}
EXPORT_SYMBOL_GPL(cn_netlink_send);
/*
* Callback helper - queues work and setup destructor for given data.
*/
static int cn_call_callback(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev;
struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV;
/* verify msg->len is within skb */
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
return -EINVAL;
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&i->id.id, &msg->id)) {
refcount_inc(&i->refcnt);
cbq = i;
break;
}
}
spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
err = 0;
}
return err;
}
/*
* Allow non-root access for NETLINK_CONNECTOR family having CN_IDX_PROC
* multicast group.
*/
static int cn_bind(struct net *net, int group)
{
unsigned long groups = (unsigned long) group;
if (ns_capable(net->user_ns, CAP_NET_ADMIN))
return 0;
if (test_bit(CN_IDX_PROC - 1, &groups))
return 0;
return -EPERM;
}
static void cn_release(struct sock *sk, unsigned long *groups)
{
if (groups && test_bit(CN_IDX_PROC - 1, groups)) {
kfree(sk->sk_user_data);
sk->sk_user_data = NULL;
}
}
/*
* Main netlink receiving function.
*
* It checks skb, netlink header and msg sizes, and calls callback helper.
*/
static void cn_rx_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
int len, err;
if (skb->len >= NLMSG_HDRLEN) {
nlh = nlmsg_hdr(skb);
len = nlmsg_len(nlh);
if (len < (int)sizeof(struct cn_msg) ||
skb->len < nlh->nlmsg_len ||
len > CONNECTOR_MAX_MSG_SIZE)
return;
err = cn_call_callback(skb_get(skb));
if (err < 0)
kfree_skb(skb);
}
}
/*
* Callback add routing - adds callback with given ID and name.
* If there is registered callback with the same ID it will not be added.
*
* May sleep.
*/
int cn_add_callback(const struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
struct cn_dev *dev = &cdev;
if (!cn_already_initialized)
return -EAGAIN;
return cn_queue_add_callback(dev->cbdev, name, id, callback);
}
EXPORT_SYMBOL_GPL(cn_add_callback);
/*
* Callback remove routing - removes callback
* with given ID.
* If there is no registered callback with given
* ID nothing happens.
*
* May sleep while waiting for reference counter to become zero.
*/
void cn_del_callback(const struct cb_id *id)
{
struct cn_dev *dev = &cdev;
cn_queue_del_callback(dev->cbdev, id);
}
EXPORT_SYMBOL_GPL(cn_del_callback);
static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
{
struct cn_queue_dev *dev = cdev.cbdev;
struct cn_callback_entry *cbq;
seq_printf(m, "Name ID\n");
spin_lock_bh(&dev->queue_lock);
list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
seq_printf(m, "%-15s %u:%u\n",
cbq->id.name,
cbq->id.id.idx,
cbq->id.id.val);
}
spin_unlock_bh(&dev->queue_lock);
return 0;
}
static int cn_init(void)
{
struct cn_dev *dev = &cdev;
struct netlink_kernel_cfg cfg = {
.groups = CN_NETLINK_USERS + 0xf,
.input = cn_rx_skb,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = cn_bind,
.release = cn_release,
};
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
if (!dev->nls)
return -EIO;
dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
if (!dev->cbdev) {
netlink_kernel_release(dev->nls);
return -EINVAL;
}
cn_already_initialized = 1;
proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
return 0;
}
static void cn_fini(void)
{
struct cn_dev *dev = &cdev;
cn_already_initialized = 0;
remove_proc_entry("connector", init_net.proc_net);
cn_queue_free_dev(dev->cbdev);
netlink_kernel_release(dev->nls);
}
subsys_initcall(cn_init);
module_exit(cn_fini);
| linux-master | drivers/connector/connector.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cn_proc.c - process events connector
*
* Copyright (C) Matt Helsley, IBM Corp. 2005
* Based on cn_fork.c by Guillaume Thouvenin <[email protected]>
* Original copyright notice follows:
* Copyright (C) 2005 BULL SA.
*/
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/connector.h>
#include <linux/gfp.h>
#include <linux/ptrace.h>
#include <linux/atomic.h>
#include <linux/pid_namespace.h>
#include <linux/cn_proc.h>
#include <linux/local_lock.h>
/*
* Size of a cn_msg followed by a proc_event structure. Since the
* sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
* add one 4-byte word to the size here, and then start the actual
* cn_msg structure 4 bytes into the stack buffer. The result is that
* the immediately following proc_event structure is aligned to 8 bytes.
*/
#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
/* See comment above; we test our assumption about sizeof struct cn_msg here. */
static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
{
BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
return (struct cn_msg *)(buffer + 4);
}
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
/* local_event.count is used as the sequence number of the netlink message */
struct local_event {
local_lock_t lock;
__u32 count;
};
static DEFINE_PER_CPU(struct local_event, local_event) = {
.lock = INIT_LOCAL_LOCK(lock),
};
static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
{
__u32 what, exit_code, *ptr;
enum proc_cn_mcast_op mc_op;
uintptr_t val;
if (!dsk || !data)
return 0;
ptr = (__u32 *)data;
what = *ptr++;
exit_code = *ptr;
val = ((struct proc_input *)(dsk->sk_user_data))->event_type;
mc_op = ((struct proc_input *)(dsk->sk_user_data))->mcast_op;
if (mc_op == PROC_CN_MCAST_IGNORE)
return 1;
if ((__u32)val == PROC_EVENT_ALL)
return 0;
/*
* Drop packet if we have to report only non-zero exit status
* (PROC_EVENT_NONZERO_EXIT) and exit status is 0
*/
if (((__u32)val & PROC_EVENT_NONZERO_EXIT) &&
(what == PROC_EVENT_EXIT)) {
if (exit_code)
return 0;
}
if ((__u32)val & what)
return 0;
return 1;
}
static inline void send_msg(struct cn_msg *msg)
{
__u32 filter_data[2];
local_lock(&local_event.lock);
msg->seq = __this_cpu_inc_return(local_event.count) - 1;
((struct proc_event *)msg->data)->cpu = smp_processor_id();
/*
* local_lock() disables preemption during send to ensure the messages
* are ordered according to their sequence numbers.
*
* If cn_netlink_send() fails, the data is not sent.
*/
filter_data[0] = ((struct proc_event *)msg->data)->what;
if (filter_data[0] == PROC_EVENT_EXIT) {
filter_data[1] =
((struct proc_event *)msg->data)->event_data.exit.exit_code;
} else {
filter_data[1] = 0;
}
cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
cn_filter, (void *)filter_data);
local_unlock(&local_event.lock);
}
void proc_fork_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_FORK;
rcu_read_lock();
parent = rcu_dereference(task->real_parent);
ev->event_data.fork.parent_pid = parent->pid;
ev->event_data.fork.parent_tgid = parent->tgid;
rcu_read_unlock();
ev->event_data.fork.child_pid = task->pid;
ev->event_data.fork.child_tgid = task->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
void proc_exec_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
ev->event_data.exec.process_tgid = task->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
void proc_id_connector(struct task_struct *task, int which_id)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
const struct cred *cred;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->what = which_id;
ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid;
rcu_read_lock();
cred = __task_cred(task);
if (which_id == PROC_EVENT_UID) {
ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
} else if (which_id == PROC_EVENT_GID) {
ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
} else {
rcu_read_unlock();
return;
}
rcu_read_unlock();
ev->timestamp_ns = ktime_get_ns();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
void proc_sid_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
ev->event_data.sid.process_tgid = task->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid;
ev->event_data.ptrace.process_tgid = task->tgid;
if (ptrace_id == PTRACE_ATTACH) {
ev->event_data.ptrace.tracer_pid = current->pid;
ev->event_data.ptrace.tracer_tgid = current->tgid;
} else if (ptrace_id == PTRACE_DETACH) {
ev->event_data.ptrace.tracer_pid = 0;
ev->event_data.ptrace.tracer_tgid = 0;
} else
return;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
void proc_comm_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid;
ev->event_data.comm.process_tgid = task->tgid;
get_task_comm(ev->event_data.comm.comm, task);
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
void proc_coredump_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
ev->event_data.coredump.process_tgid = task->tgid;
rcu_read_lock();
if (pid_alive(task)) {
parent = rcu_dereference(task->real_parent);
ev->event_data.coredump.parent_pid = parent->pid;
ev->event_data.coredump.parent_tgid = parent->tgid;
}
rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
ev->event_data.exit.process_tgid = task->tgid;
ev->event_data.exit.exit_code = task->exit_code;
ev->event_data.exit.exit_signal = task->exit_signal;
rcu_read_lock();
if (pid_alive(task)) {
parent = rcu_dereference(task->real_parent);
ev->event_data.exit.parent_pid = parent->pid;
ev->event_data.exit.parent_tgid = parent->tgid;
}
rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
/*
* Send an acknowledgement message to userspace
*
* Use 0 for success, EFOO otherwise.
* Note: this is the negative of conventional kernel error
* values because it's not being returned via syscall return
* mechanisms.
*/
static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
msg->seq = rcvd_seq;
ev->timestamp_ns = ktime_get_ns();
ev->cpu = -1;
ev->what = PROC_EVENT_NONE;
ev->event_data.ack.err = err;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
send_msg(msg);
}
/**
* cn_proc_mcast_ctl
* @msg: message sent from userspace via the connector
* @nsp: NETLINK_CB of the client's socket buffer
*/
static void cn_proc_mcast_ctl(struct cn_msg *msg,
struct netlink_skb_parms *nsp)
{
enum proc_cn_mcast_op mc_op = 0, prev_mc_op = 0;
struct proc_input *pinput = NULL;
enum proc_cn_event ev_type = 0;
int err = 0, initial = 0;
struct sock *sk = NULL;
/*
* Events are reported with respect to the initial pid
* and user namespaces so ignore requestors from
* other namespaces.
*/
if ((current_user_ns() != &init_user_ns) ||
!task_is_in_init_pid_ns(current))
return;
if (msg->len == sizeof(*pinput)) {
pinput = (struct proc_input *)msg->data;
mc_op = pinput->mcast_op;
ev_type = pinput->event_type;
} else if (msg->len == sizeof(mc_op)) {
mc_op = *((enum proc_cn_mcast_op *)msg->data);
ev_type = PROC_EVENT_ALL;
} else {
return;
}
ev_type = valid_event((enum proc_cn_event)ev_type);
if (ev_type == PROC_EVENT_NONE)
ev_type = PROC_EVENT_ALL;
if (nsp->sk) {
sk = nsp->sk;
if (sk->sk_user_data == NULL) {
sk->sk_user_data = kzalloc(sizeof(struct proc_input),
GFP_KERNEL);
if (sk->sk_user_data == NULL) {
err = ENOMEM;
goto out;
}
initial = 1;
} else {
prev_mc_op =
((struct proc_input *)(sk->sk_user_data))->mcast_op;
}
((struct proc_input *)(sk->sk_user_data))->event_type =
ev_type;
((struct proc_input *)(sk->sk_user_data))->mcast_op = mc_op;
}
switch (mc_op) {
case PROC_CN_MCAST_LISTEN:
if (initial || (prev_mc_op != PROC_CN_MCAST_LISTEN))
atomic_inc(&proc_event_num_listeners);
break;
case PROC_CN_MCAST_IGNORE:
if (!initial && (prev_mc_op != PROC_CN_MCAST_IGNORE))
atomic_dec(&proc_event_num_listeners);
((struct proc_input *)(sk->sk_user_data))->event_type =
PROC_EVENT_NONE;
break;
default:
err = EINVAL;
break;
}
out:
cn_proc_ack(err, msg->seq, msg->ack);
}
/*
* cn_proc_init - initialization entry point
*
* Adds the connector callback to the connector driver.
*/
static int __init cn_proc_init(void)
{
int err = cn_add_callback(&cn_proc_event_id,
"cn_proc",
&cn_proc_mcast_ctl);
if (err) {
pr_warn("cn_proc failed to register\n");
return err;
}
return 0;
}
device_initcall(cn_proc_init);
| linux-master | drivers/connector/cn_proc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cn_queue.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <[email protected]>
* All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/suspend.h>
#include <linux/connector.h>
#include <linux/delay.h>
static struct cn_callback_entry *
cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
const struct cb_id *id,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq;
cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
if (!cbq) {
pr_err("Failed to create new callback queue.\n");
return NULL;
}
refcount_set(&cbq->refcnt, 1);
atomic_inc(&dev->refcnt);
cbq->pdev = dev;
snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
cbq->callback = callback;
return cbq;
}
void cn_queue_release_callback(struct cn_callback_entry *cbq)
{
if (!refcount_dec_and_test(&cbq->refcnt))
return;
atomic_dec(&cbq->pdev->refcnt);
kfree(cbq);
}
int cn_cb_equal(const struct cb_id *i1, const struct cb_id *i2)
{
return ((i1->idx == i2->idx) && (i1->val == i2->val));
}
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
const struct cb_id *id,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq, *__cbq;
int found = 0;
cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
if (!cbq)
return -ENOMEM;
spin_lock_bh(&dev->queue_lock);
list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, id)) {
found = 1;
break;
}
}
if (!found)
list_add_tail(&cbq->callback_entry, &dev->queue_list);
spin_unlock_bh(&dev->queue_lock);
if (found) {
cn_queue_release_callback(cbq);
return -EINVAL;
}
cbq->seq = 0;
cbq->group = cbq->id.id.idx;
return 0;
}
void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id)
{
struct cn_callback_entry *cbq, *n;
int found = 0;
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
if (cn_cb_equal(&cbq->id.id, id)) {
list_del(&cbq->callback_entry);
found = 1;
break;
}
}
spin_unlock_bh(&dev->queue_lock);
if (found)
cn_queue_release_callback(cbq);
}
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
{
struct cn_queue_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
snprintf(dev->name, sizeof(dev->name), "%s", name);
atomic_set(&dev->refcnt, 0);
INIT_LIST_HEAD(&dev->queue_list);
spin_lock_init(&dev->queue_lock);
dev->nls = nls;
return dev;
}
void cn_queue_free_dev(struct cn_queue_dev *dev)
{
struct cn_callback_entry *cbq, *n;
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
list_del(&cbq->callback_entry);
spin_unlock_bh(&dev->queue_lock);
while (atomic_read(&dev->refcnt)) {
pr_info("Waiting for %s to become free: refcnt=%d.\n",
dev->name, atomic_read(&dev->refcnt));
msleep(1000);
}
kfree(dev);
dev = NULL;
}
| linux-master | drivers/connector/cn_queue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm-core.h"
#include "dm-rq.h"
#include "dm-uevent.h"
#include "dm-ima.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/uio.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
#include <linux/blk-crypto-profile.h>
#define DM_MSG_PREFIX "core"
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
*/
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24
/*
* For REQ_POLLED fs bio, this flag is set if we link mapped underlying
* dm_io into one list, and reuse bio->bi_private as the list head. Before
* ending this fs bio, we will recover its ->bi_private.
*/
#define REQ_DM_POLL_LIST REQ_DRV
static const char *_name = DM_NAME;
static unsigned int major;
static unsigned int _major;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
static void do_deferred_remove(struct work_struct *w);
static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
static struct workqueue_struct *deferred_remove_workqueue;
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
void dm_issue_global_event(void)
{
atomic_inc(&dm_global_event_nr);
wake_up(&dm_global_eventq);
}
DEFINE_STATIC_KEY_FALSE(stats_enabled);
DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
DEFINE_STATIC_KEY_FALSE(zoned_enabled);
/*
* One of these is allocated (on-stack) per original bio.
*/
struct clone_info {
struct dm_table *map;
struct bio *bio;
struct dm_io *io;
sector_t sector;
unsigned int sector_count;
bool is_abnormal_io:1;
bool submit_as_polled:1;
};
static inline struct dm_target_io *clone_to_tio(struct bio *clone)
{
return container_of(clone, struct dm_target_io, clone);
}
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
return (char *)bio - DM_IO_BIO_OFFSET - data_size;
}
EXPORT_SYMBOL_GPL(dm_per_bio_data);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
struct dm_io *io = (struct dm_io *)((char *)data + data_size);
if (io->magic == DM_IO_MAGIC)
return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
BUG_ON(io->magic != DM_TIO_MAGIC);
return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
{
return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define MINOR_ALLOCED ((void *)-1)
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
static int swap_bios = DEFAULT_SWAP_BIOS;
static int get_swap_bios(void)
{
int latch = READ_ONCE(swap_bios);
if (unlikely(latch <= 0))
latch = DEFAULT_SWAP_BIOS;
return latch;
}
struct table_device {
struct list_head list;
refcount_t count;
struct dm_dev dm_dev;
};
/*
* Bio-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_BIO_BASED_IOS 16
static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
int param = READ_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
if (param < min)
modified_param = min;
else if (param > max)
modified_param = max;
else
modified = false;
if (modified) {
(void)cmpxchg(module_param, param, modified_param);
param = modified_param;
}
return param;
}
unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
{
unsigned int param = READ_ONCE(*module_param);
unsigned int modified_param = 0;
if (!param)
modified_param = def;
else if (param > max)
modified_param = max;
if (modified_param) {
(void)cmpxchg(module_param, param, modified_param);
param = modified_param;
}
return param;
}
unsigned int dm_get_reserved_bio_based_ios(void)
{
return __dm_get_module_param(&reserved_bio_based_ios,
RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
static unsigned int dm_get_numa_node(void)
{
return __dm_get_module_param_int(&dm_numa_node,
DM_NUMA_NODE, num_online_nodes() - 1);
}
static int __init local_init(void)
{
int r;
r = dm_uevent_init();
if (r)
return r;
deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
if (!deferred_remove_workqueue) {
r = -ENOMEM;
goto out_uevent_exit;
}
_major = major;
r = register_blkdev(_major, _name);
if (r < 0)
goto out_free_workqueue;
if (!_major)
_major = r;
return 0;
out_free_workqueue:
destroy_workqueue(deferred_remove_workqueue);
out_uevent_exit:
dm_uevent_exit();
return r;
}
static void local_exit(void)
{
destroy_workqueue(deferred_remove_workqueue);
unregister_blkdev(_major, _name);
dm_uevent_exit();
_major = 0;
DMINFO("cleaned up");
}
static int (*_inits[])(void) __initdata = {
local_init,
dm_target_init,
dm_linear_init,
dm_stripe_init,
dm_io_init,
dm_kcopyd_init,
dm_interface_init,
dm_statistics_init,
};
static void (*_exits[])(void) = {
local_exit,
dm_target_exit,
dm_linear_exit,
dm_stripe_exit,
dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
dm_statistics_exit,
};
static int __init dm_init(void)
{
const int count = ARRAY_SIZE(_inits);
int r, i;
#if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
" Duplicate IMA measurements will not be recorded in the IMA log.");
#endif
for (i = 0; i < count; i++) {
r = _inits[i]();
if (r)
goto bad;
}
return 0;
bad:
while (i--)
_exits[i]();
return r;
}
static void __exit dm_exit(void)
{
int i = ARRAY_SIZE(_exits);
while (i--)
_exits[i]();
/*
* Should be empty by this point.
*/
idr_destroy(&_minor_idr);
}
/*
* Block device functions
*/
int dm_deleting_md(struct mapped_device *md)
{
return test_bit(DMF_DELETING, &md->flags);
}
static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
md = disk->private_data;
if (!md)
goto out;
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
atomic_inc(&md->open_count);
out:
spin_unlock(&_minor_lock);
return md ? 0 : -ENXIO;
}
static void dm_blk_close(struct gendisk *disk)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
md = disk->private_data;
if (WARN_ON(!md))
goto out;
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
out:
spin_unlock(&_minor_lock);
}
int dm_open_count(struct mapped_device *md)
{
return atomic_read(&md->open_count);
}
/*
* Guarantees nothing is using the device before it's deleted.
*/
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
{
int r = 0;
spin_lock(&_minor_lock);
if (dm_open_count(md)) {
r = -EBUSY;
if (mark_deferred)
set_bit(DMF_DEFERRED_REMOVE, &md->flags);
} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
r = -EEXIST;
else
set_bit(DMF_DELETING, &md->flags);
spin_unlock(&_minor_lock);
return r;
}
int dm_cancel_deferred_remove(struct mapped_device *md)
{
int r = 0;
spin_lock(&_minor_lock);
if (test_bit(DMF_DELETING, &md->flags))
r = -EBUSY;
else
clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
spin_unlock(&_minor_lock);
return r;
}
static void do_deferred_remove(struct work_struct *w)
{
dm_deferred_remove();
}
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
return dm_get_geometry(md, geo);
}
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
{
struct dm_target *ti;
struct dm_table *map;
int r;
retry:
r = -ENOTTY;
map = dm_get_live_table(md, srcu_idx);
if (!map || !dm_table_get_size(map))
return r;
/* We only support devices that have a single target */
if (map->num_targets != 1)
return r;
ti = dm_table_get_target(map, 0);
if (!ti->type->prepare_ioctl)
return r;
if (dm_suspended_md(md))
return -EAGAIN;
r = ti->type->prepare_ioctl(ti, bdev);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
fsleep(10000);
goto retry;
}
return r;
}
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
{
dm_put_live_table(md, srcu_idx);
}
static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
int r, srcu_idx;
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
if (r > 0) {
/*
* Target determined this ioctl is being issued against a
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
DMDEBUG_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
goto out;
}
}
if (!bdev->bd_disk->fops->ioctl)
r = -ENOTTY;
else
r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
}
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
static inline bool bio_is_flush_with_data(struct bio *bio)
{
return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
}
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
{
/*
* If REQ_PREFLUSH set, don't account payload, it will be
* submitted (and accounted) after this flush completes.
*/
if (bio_is_flush_with_data(bio))
return 0;
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
return io->sectors;
return bio_sectors(bio);
}
static void dm_io_acct(struct dm_io *io, bool end)
{
struct bio *bio = io->orig_bio;
if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
if (!end)
bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
io->start_time);
else
bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
dm_io_sectors(io, bio),
io->start_time);
}
if (static_branch_unlikely(&stats_enabled) &&
unlikely(dm_stats_used(&io->md->stats))) {
sector_t sector;
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
sector = bio_end_sector(bio) - io->sector_offset;
else
sector = bio->bi_iter.bi_sector;
dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
sector, dm_io_sectors(io, bio),
end, io->start_time, &io->stats_aux);
}
}
static void __dm_start_io_acct(struct dm_io *io)
{
dm_io_acct(io, false);
}
static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
{
/*
* Ensure IO accounting is only ever started once.
*/
if (dm_io_flagged(io, DM_IO_ACCOUNTED))
return;
/* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
dm_io_set_flag(io, DM_IO_ACCOUNTED);
} else {
unsigned long flags;
/* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
spin_lock_irqsave(&io->lock, flags);
if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
dm_io_set_flag(io, DM_IO_ACCOUNTED);
spin_unlock_irqrestore(&io->lock, flags);
}
__dm_start_io_acct(io);
}
static void dm_end_io_acct(struct dm_io *io)
{
dm_io_acct(io, true);
}
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{
struct dm_io *io;
struct dm_target_io *tio;
struct bio *clone;
clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
tio = clone_to_tio(clone);
tio->flags = 0;
dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
tio->io = NULL;
io = container_of(tio, struct dm_io, tio);
io->magic = DM_IO_MAGIC;
io->status = BLK_STS_OK;
/* one ref is for submission, the other is for completion */
atomic_set(&io->io_count, 2);
this_cpu_inc(*md->pending_io);
io->orig_bio = bio;
io->md = md;
spin_lock_init(&io->lock);
io->start_time = jiffies;
io->flags = 0;
if (blk_queue_io_stat(md->queue))
dm_io_set_flag(io, DM_IO_BLK_STAT);
if (static_branch_unlikely(&stats_enabled) &&
unlikely(dm_stats_used(&md->stats)))
dm_stats_record_start(&md->stats, &io->stats_aux);
return io;
}
static void free_io(struct dm_io *io)
{
bio_put(&io->tio.clone);
}
static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
{
struct mapped_device *md = ci->io->md;
struct dm_target_io *tio;
struct bio *clone;
if (!ci->io->tio.io) {
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
/* alloc_io() already initialized embedded clone */
clone = &tio->clone;
} else {
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
&md->mempools->bs);
if (!clone)
return NULL;
/* REQ_DM_POLL_LIST shouldn't be inherited */
clone->bi_opf &= ~REQ_DM_POLL_LIST;
tio = clone_to_tio(clone);
tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
}
tio->magic = DM_TIO_MAGIC;
tio->io = ci->io;
tio->ti = ti;
tio->target_bio_nr = target_bio_nr;
tio->len_ptr = len;
tio->old_sector = 0;
/* Set default bdev, but target must bio_set_dev() before issuing IO */
clone->bi_bdev = md->disk->part0;
if (unlikely(ti->needs_bio_set_dev))
bio_set_dev(clone, md->disk->part0);
if (len) {
clone->bi_iter.bi_size = to_bytes(*len);
if (bio_integrity(clone))
bio_integrity_trim(clone);
}
return clone;
}
static void free_tio(struct bio *clone)
{
if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
return;
bio_put(clone);
}
/*
* Add the bio to the list of deferred io.
*/
static void queue_io(struct mapped_device *md, struct bio *bio)
{
unsigned long flags;
spin_lock_irqsave(&md->deferred_lock, flags);
bio_list_add(&md->deferred, bio);
spin_unlock_irqrestore(&md->deferred_lock, flags);
queue_work(md->wq, &md->work);
}
/*
* Everyone (including functions in this file), should use this
* function to access the md->map field, and make sure they call
* dm_put_live_table() when finished.
*/
struct dm_table *dm_get_live_table(struct mapped_device *md,
int *srcu_idx) __acquires(md->io_barrier)
{
*srcu_idx = srcu_read_lock(&md->io_barrier);
return srcu_dereference(md->map, &md->io_barrier);
}
void dm_put_live_table(struct mapped_device *md,
int srcu_idx) __releases(md->io_barrier)
{
srcu_read_unlock(&md->io_barrier, srcu_idx);
}
void dm_sync_table(struct mapped_device *md)
{
synchronize_srcu(&md->io_barrier);
synchronize_rcu_expedited();
}
/*
* A fast alternative to dm_get_live_table/dm_put_live_table.
* The caller must not block between these two functions.
*/
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{
rcu_read_lock();
return rcu_dereference(md->map);
}
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{
rcu_read_unlock();
}
static char *_dm_claim_ptr = "I belong to device-mapper";
/*
* Open a table device so we can use it as a map destination.
*/
static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{
struct table_device *td;
struct block_device *bdev;
u64 part_off;
int r;
td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
if (!td)
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL);
if (IS_ERR(bdev)) {
r = PTR_ERR(bdev);
goto out_free_td;
}
/*
* We can be called before the dm disk is added. In that case we can't
* register the holder relation here. It will be done once add_disk was
* called.
*/
if (md->disk->slave_dir) {
r = bd_link_disk_holder(bdev, md->disk);
if (r)
goto out_blkdev_put;
}
td->dm_dev.mode = mode;
td->dm_dev.bdev = bdev;
td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
blkdev_put(bdev, _dm_claim_ptr);
out_free_td:
kfree(td);
return ERR_PTR(r);
}
/*
* Close a table device that we've been using.
*/
static void close_table_device(struct table_device *td, struct mapped_device *md)
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
blkdev_put(td->dm_dev.bdev, _dm_claim_ptr);
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
blk_mode_t mode)
{
struct table_device *td;
list_for_each_entry(td, l, list)
if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
return td;
return NULL;
}
int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
struct dm_dev **result)
{
struct table_device *td;
mutex_lock(&md->table_devices_lock);
td = find_table_device(&md->table_devices, dev, mode);
if (!td) {
td = open_table_device(md, dev, mode);
if (IS_ERR(td)) {
mutex_unlock(&md->table_devices_lock);
return PTR_ERR(td);
}
} else {
refcount_inc(&td->count);
}
mutex_unlock(&md->table_devices_lock);
*result = &td->dm_dev;
return 0;
}
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
struct table_device *td = container_of(d, struct table_device, dm_dev);
mutex_lock(&md->table_devices_lock);
if (refcount_dec_and_test(&td->count))
close_table_device(td, md);
mutex_unlock(&md->table_devices_lock);
}
/*
* Get the geometry associated with a dm device
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
*geo = md->geometry;
return 0;
}
/*
* Set the geometry of a device.
*/
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
if (geo->start > sz) {
DMERR("Start sector is beyond the geometry limits.");
return -EINVAL;
}
md->geometry = *geo;
return 0;
}
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}
static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
{
struct mapped_device *md = io->md;
if (first_stage) {
struct dm_io *next = md->requeue_list;
md->requeue_list = io;
io->next = next;
} else {
bio_list_add_head(&md->deferred, io->orig_bio);
}
}
static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
{
if (first_stage)
queue_work(md->wq, &md->requeue_work);
else
queue_work(md->wq, &md->work);
}
/*
* Return true if the dm_io's original bio is requeued.
* io->status is updated with error if requeue disallowed.
*/
static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
{
struct bio *bio = io->orig_bio;
bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
(bio->bi_opf & REQ_POLLED));
struct mapped_device *md = io->md;
bool requeued = false;
if (handle_requeue || handle_polled_eagain) {
unsigned long flags;
if (bio->bi_opf & REQ_POLLED) {
/*
* Upper layer won't help us poll split bio
* (io->orig_bio may only reflect a subset of the
* pre-split original) so clear REQ_POLLED.
*/
bio_clear_polled(bio);
}
/*
* Target requested pushing back the I/O or
* polled IO hit BLK_STS_AGAIN.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if ((__noflush_suspending(md) &&
!WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
handle_polled_eagain || first_stage) {
dm_requeue_add_io(io, first_stage);
requeued = true;
} else {
/*
* noflush suspend was interrupted or this is
* a write to a zoned target.
*/
io->status = BLK_STS_IOERR;
}
spin_unlock_irqrestore(&md->deferred_lock, flags);
}
if (requeued)
dm_kick_requeue(md, first_stage);
return requeued;
}
static void __dm_io_complete(struct dm_io *io, bool first_stage)
{
struct bio *bio = io->orig_bio;
struct mapped_device *md = io->md;
blk_status_t io_error;
bool requeued;
requeued = dm_handle_requeue(io, first_stage);
if (requeued && first_stage)
return;
io_error = io->status;
if (dm_io_flagged(io, DM_IO_ACCOUNTED))
dm_end_io_acct(io);
else if (!io_error) {
/*
* Must handle target that DM_MAPIO_SUBMITTED only to
* then bio_endio() rather than dm_submit_bio_remap()
*/
__dm_start_io_acct(io);
dm_end_io_acct(io);
}
free_io(io);
smp_wmb();
this_cpu_dec(*md->pending_io);
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
/* Return early if the original bio was requeued */
if (requeued)
return;
if (bio_is_flush_with_data(bio)) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
*/
bio->bi_opf &= ~REQ_PREFLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
if (io_error)
bio->bi_status = io_error;
bio_endio(bio);
}
}
static void dm_wq_requeue_work(struct work_struct *work)
{
struct mapped_device *md = container_of(work, struct mapped_device,
requeue_work);
unsigned long flags;
struct dm_io *io;
/* reuse deferred lock to simplify dm_handle_requeue */
spin_lock_irqsave(&md->deferred_lock, flags);
io = md->requeue_list;
md->requeue_list = NULL;
spin_unlock_irqrestore(&md->deferred_lock, flags);
while (io) {
struct dm_io *next = io->next;
dm_io_rewind(io, &md->disk->bio_split);
io->next = NULL;
__dm_io_complete(io, false);
io = next;
cond_resched();
}
}
/*
* Two staged requeue:
*
* 1) io->orig_bio points to the real original bio, and the part mapped to
* this io must be requeued, instead of other parts of the original bio.
*
* 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
*/
static void dm_io_complete(struct dm_io *io)
{
bool first_requeue;
/*
* Only dm_io that has been split needs two stage requeue, otherwise
* we may run into long bio clone chain during suspend and OOM could
* be triggered.
*
* Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
* also aren't handled via the first stage requeue.
*/
if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
first_requeue = true;
else
first_requeue = false;
__dm_io_complete(io, first_requeue);
}
/*
* Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc.
*/
static inline void __dm_io_dec_pending(struct dm_io *io)
{
if (atomic_dec_and_test(&io->io_count))
dm_io_complete(io);
}
static void dm_io_set_error(struct dm_io *io, blk_status_t error)
{
unsigned long flags;
/* Push-back supersedes any I/O errors */
spin_lock_irqsave(&io->lock, flags);
if (!(io->status == BLK_STS_DM_REQUEUE &&
__noflush_suspending(io->md))) {
io->status = error;
}
spin_unlock_irqrestore(&io->lock, flags);
}
static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
{
if (unlikely(error))
dm_io_set_error(io, error);
__dm_io_dec_pending(io);
}
/*
* The queue_limits are only valid as long as you have a reference
* count on 'md'. But _not_ imposing verification to avoid atomic_read(),
*/
static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
return &md->queue->limits;
}
void disable_discard(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support DISCARD, disable it */
limits->max_discard_sectors = 0;
}
void disable_write_zeroes(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support WRITE ZEROES, disable it */
limits->max_write_zeroes_sectors = 0;
}
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
}
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
struct dm_target_io *tio = clone_to_tio(bio);
struct dm_target *ti = tio->ti;
dm_endio_fn endio = ti->type->end_io;
struct dm_io *io = tio->io;
struct mapped_device *md = io->md;
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(bio->bi_bdev))
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bdev_write_zeroes_sectors(bio->bi_bdev))
disable_write_zeroes(md);
}
if (static_branch_unlikely(&zoned_enabled) &&
unlikely(bdev_is_zoned(bio->bi_bdev)))
dm_zone_endio(io, bio);
if (endio) {
int r = endio(ti, bio, &error);
switch (r) {
case DM_ENDIO_REQUEUE:
if (static_branch_unlikely(&zoned_enabled)) {
/*
* Requeuing writes to a sequential zone of a zoned
* target will break the sequential write pattern:
* fail such IO.
*/
if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
error = BLK_STS_IOERR;
else
error = BLK_STS_DM_REQUEUE;
} else
error = BLK_STS_DM_REQUEUE;
fallthrough;
case DM_ENDIO_DONE:
break;
case DM_ENDIO_INCOMPLETE:
/* The target will handle the io */
return;
default:
DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, bio)))
up(&md->swap_bios_semaphore);
free_tio(bio);
dm_io_dec_pending(io, error);
}
/*
* Return maximum size of I/O possible at the supplied sector up to the current
* target boundary.
*/
static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
sector_t target_offset)
{
return ti->len - target_offset;
}
static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
unsigned int max_granularity,
unsigned int max_sectors)
{
sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
/*
* Does the target need to split IO even further?
* - varied (per target) IO splitting is a tenet of DM; this
* explains why stacked chunk_sectors based splitting via
* bio_split_to_limits() isn't possible here.
*/
if (!max_granularity)
return len;
return min_t(sector_t, len,
min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
blk_chunk_sectors_left(target_offset, max_granularity)));
}
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
{
return __max_io_len(ti, sector, ti->max_io_len, 0);
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
if (len > UINT_MAX) {
DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
(unsigned long long)len, UINT_MAX);
ti->error = "Maximum size of target IO is too large";
return -EINVAL;
}
ti->max_io_len = (uint32_t) len;
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
sector_t sector, int *srcu_idx)
__acquires(md->io_barrier)
{
struct dm_table *map;
struct dm_target *ti;
map = dm_get_live_table(md, srcu_idx);
if (!map)
return NULL;
ti = dm_table_find_target(map, sector);
if (!ti)
return NULL;
return ti;
}
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
pfn_t *pfn)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
long len, ret = -EIO;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (!ti->type->direct_access)
goto out;
len = max_io_len(ti, sector) / PAGE_SECTORS;
if (len < 1)
goto out;
nr_pages = min(len, nr_pages);
ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
int ret = -EIO;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (WARN_ON(!ti->type->dax_zero_page_range)) {
/*
* ->zero_page_range() is mandatory dax operation. If we are
* here, something is wrong.
*/
goto out;
}
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
int srcu_idx;
long ret = 0;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti || !ti->type->dax_recovery_write)
goto out;
ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
* operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
* __send_duplicate_bios().
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
* sent in a next bio.
*
* A diagram that explains the arithmetics:
* +--------------------+---------------+-------+
* | 1 | 2 | 3 |
* +--------------------+---------------+-------+
*
* <-------------- *tio->len_ptr --------------->
* <----- bio_sectors ----->
* <-- n_sectors -->
*
* Region 1 was already iterated over with bio_advance or similar function.
* (it may be empty if the target doesn't use bio_advance)
* Region 2 is the remaining bio size that the target wants to process.
* (it may be empty if region 1 is non-empty, although there is no reason
* to make it empty)
* The target requires that region 3 is to be sent in the next bio.
*
* If the target wants to receive multiple copies of the bio (via num_*bios, etc),
* the partially processed part (the sum of regions 1+2) must be the same for all
* copies of the bio.
*/
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
{
struct dm_target_io *tio = clone_to_tio(bio);
struct dm_io *io = tio->io;
unsigned int bio_sectors = bio_sectors(bio);
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
BUG_ON(op_is_zone_mgmt(bio_op(bio)));
BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bio_sectors > *tio->len_ptr);
BUG_ON(n_sectors > bio_sectors);
*tio->len_ptr -= bio_sectors - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
/*
* __split_and_process_bio() may have already saved mapped part
* for accounting but it is being reduced so update accordingly.
*/
dm_io_set_flag(io, DM_IO_WAS_SPLIT);
io->sectors = n_sectors;
io->sector_offset = bio_sectors(io->orig_bio);
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
* @clone: clone bio that DM core passed to target's .map function
* @tgt_clone: clone of @clone bio that target needs submitted
*
* Targets should use this interface to submit bios they take
* ownership of when returning DM_MAPIO_SUBMITTED.
*
* Target should also enable ti->accounts_remapped_io
*/
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
{
struct dm_target_io *tio = clone_to_tio(clone);
struct dm_io *io = tio->io;
/* establish bio that will get submitted */
if (!tgt_clone)
tgt_clone = clone;
/*
* Account io->origin_bio to DM dev on behalf of target
* that took ownership of IO with DM_MAPIO_SUBMITTED.
*/
dm_start_io_acct(io, clone);
trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
tio->old_sector);
submit_bio_noacct(tgt_clone);
}
EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
{
mutex_lock(&md->swap_bios_lock);
while (latch < md->swap_bios) {
cond_resched();
down(&md->swap_bios_semaphore);
md->swap_bios--;
}
while (latch > md->swap_bios) {
cond_resched();
up(&md->swap_bios_semaphore);
md->swap_bios++;
}
mutex_unlock(&md->swap_bios_lock);
}
static void __map_bio(struct bio *clone)
{
struct dm_target_io *tio = clone_to_tio(clone);
struct dm_target *ti = tio->ti;
struct dm_io *io = tio->io;
struct mapped_device *md = io->md;
int r;
clone->bi_end_io = clone_endio;
/*
* Map the clone.
*/
tio->old_sector = clone->bi_iter.bi_sector;
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone))) {
int latch = get_swap_bios();
if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch);
down(&md->swap_bios_semaphore);
}
if (static_branch_unlikely(&zoned_enabled)) {
/*
* Check if the IO needs a special mapping due to zone append
* emulation on zoned target. In this case, dm_zone_map_bio()
* calls the target map operation.
*/
if (unlikely(dm_emulate_zone_append(md)))
r = dm_zone_map_bio(tio);
else
r = ti->type->map(ti, clone);
} else
r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
/* target has assumed ownership of this io */
if (!ti->accounts_remapped_io)
dm_start_io_acct(io, clone);
break;
case DM_MAPIO_REMAPPED:
dm_submit_bio_remap(clone, NULL);
break;
case DM_MAPIO_KILL:
case DM_MAPIO_REQUEUE:
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone)))
up(&md->swap_bios_semaphore);
free_tio(clone);
if (r == DM_MAPIO_KILL)
dm_io_dec_pending(io, BLK_STS_IOERR);
else
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
}
static void setup_split_accounting(struct clone_info *ci, unsigned int len)
{
struct dm_io *io = ci->io;
if (ci->sector_count > len) {
/*
* Split needed, save the mapped part for accounting.
* NOTE: dm_accept_partial_bio() will update accordingly.
*/
dm_io_set_flag(io, DM_IO_WAS_SPLIT);
io->sectors = len;
io->sector_offset = bio_sectors(ci->bio);
}
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
struct dm_target *ti, unsigned int num_bios,
unsigned *len)
{
struct bio *bio;
int try;
for (try = 0; try < 2; try++) {
int bio_nr;
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
bio = alloc_tio(ci, ti, bio_nr, len,
try ? GFP_NOIO : GFP_NOWAIT);
if (!bio)
break;
bio_list_add(blist, bio);
}
if (try)
mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios)
return;
while ((bio = bio_list_pop(blist)))
free_tio(bio);
}
}
static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned int num_bios, unsigned int *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
unsigned int ret = 0;
switch (num_bios) {
case 0:
break;
case 1:
if (len)
setup_split_accounting(ci, *len);
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
__map_bio(clone);
ret = 1;
break;
default:
if (len)
setup_split_accounting(ci, *len);
/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
alloc_multiple_bios(&blist, ci, ti, num_bios, len);
while ((clone = bio_list_pop(&blist))) {
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone);
ret += 1;
}
break;
}
return ret;
}
static void __send_empty_flush(struct clone_info *ci)
{
struct dm_table *t = ci->map;
struct bio flush_bio;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
ci->bio = &flush_bio;
ci->sector_count = 0;
ci->io->tio.clone.bi_iter.bi_size = 0;
for (unsigned int i = 0; i < t->num_targets; i++) {
unsigned int bios;
struct dm_target *ti = dm_table_get_target(t, i);
atomic_add(ti->num_flush_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
}
/*
* alloc_io() takes one extra reference for submission, so the
* reference won't reach 0 without the following subtraction
*/
atomic_sub(1, &ci->io->io_count);
bio_uninit(ci->bio);
}
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
unsigned int num_bios,
unsigned int max_granularity,
unsigned int max_sectors)
{
unsigned int len, bios;
len = min_t(sector_t, ci->sector_count,
__max_io_len(ti, ci->sector, max_granularity, max_sectors));
atomic_add(num_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, num_bios, &len);
/*
* alloc_io() takes one extra reference for submission, so the
* reference won't reach 0 without the following (+1) subtraction
*/
atomic_sub(num_bios - bios + 1, &ci->io->io_count);
ci->sector += len;
ci->sector_count -= len;
}
static bool is_abnormal_io(struct bio *bio)
{
enum req_op op = bio_op(bio);
if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
switch (op) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return true;
default:
break;
}
}
return false;
}
static blk_status_t __process_abnormal_io(struct clone_info *ci,
struct dm_target *ti)
{
unsigned int num_bios = 0;
unsigned int max_granularity = 0;
unsigned int max_sectors = 0;
struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
switch (bio_op(ci->bio)) {
case REQ_OP_DISCARD:
num_bios = ti->num_discard_bios;
max_sectors = limits->max_discard_sectors;
if (ti->max_discard_granularity)
max_granularity = max_sectors;
break;
case REQ_OP_SECURE_ERASE:
num_bios = ti->num_secure_erase_bios;
max_sectors = limits->max_secure_erase_sectors;
if (ti->max_secure_erase_granularity)
max_granularity = max_sectors;
break;
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
max_sectors = limits->max_write_zeroes_sectors;
if (ti->max_write_zeroes_granularity)
max_granularity = max_sectors;
break;
default:
break;
}
/*
* Even though the device advertised support for this type of
* request, that does not mean every target supports it, and
* reconfiguration might also have changed that since the
* check was performed.
*/
if (unlikely(!num_bios))
return BLK_STS_NOTSUPP;
__send_changing_extent_only(ci, ti, num_bios,
max_granularity, max_sectors);
return BLK_STS_OK;
}
/*
* Reuse ->bi_private as dm_io list head for storing all dm_io instances
* associated with this bio, and this bio's bi_private needs to be
* stored in dm_io->data before the reuse.
*
* bio->bi_private is owned by fs or upper layer, so block layer won't
* touch it after splitting. Meantime it won't be changed by anyone after
* bio is submitted. So this reuse is safe.
*/
static inline struct dm_io **dm_poll_list_head(struct bio *bio)
{
return (struct dm_io **)&bio->bi_private;
}
static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
{
struct dm_io **head = dm_poll_list_head(bio);
if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
bio->bi_opf |= REQ_DM_POLL_LIST;
/*
* Save .bi_private into dm_io, so that we can reuse
* .bi_private as dm_io list head for storing dm_io list
*/
io->data = bio->bi_private;
/* tell block layer to poll for completion */
bio->bi_cookie = ~BLK_QC_T_NONE;
io->next = NULL;
} else {
/*
* bio recursed due to split, reuse original poll list,
* and save bio->bi_private too.
*/
io->data = (*head)->data;
io->next = *head;
}
*head = io;
}
/*
* Select the correct strategy for processing a non-flush bio.
*/
static blk_status_t __split_and_process_bio(struct clone_info *ci)
{
struct bio *clone;
struct dm_target *ti;
unsigned int len;
ti = dm_table_find_target(ci->map, ci->sector);
if (unlikely(!ti))
return BLK_STS_IOERR;
if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
unlikely(!dm_target_supports_nowait(ti->type)))
return BLK_STS_NOTSUPP;
if (unlikely(ci->is_abnormal_io))
return __process_abnormal_io(ci, ti);
/*
* Only support bio polling for normal IO, and the target io is
* exactly inside the dm_io instance (verified in dm_poll_dm_io)
*/
ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
setup_split_accounting(ci, len);
clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
__map_bio(clone);
ci->sector += len;
ci->sector_count -= len;
return BLK_STS_OK;
}
static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
struct dm_table *map, struct bio *bio, bool is_abnormal)
{
ci->map = map;
ci->io = alloc_io(md, bio);
ci->bio = bio;
ci->is_abnormal_io = is_abnormal;
ci->submit_as_polled = false;
ci->sector = bio->bi_iter.bi_sector;
ci->sector_count = bio_sectors(bio);
/* Shouldn't happen but sector_count was being set to 0 so... */
if (static_branch_unlikely(&zoned_enabled) &&
WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
ci->sector_count = 0;
}
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
static void dm_split_and_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
struct dm_io *io;
blk_status_t error = BLK_STS_OK;
bool is_abnormal;
is_abnormal = is_abnormal_io(bio);
if (unlikely(is_abnormal)) {
/*
* Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
* otherwise associated queue_limits won't be imposed.
*/
bio = bio_split_to_limits(bio);
if (!bio)
return;
}
init_clone_info(&ci, md, map, bio, is_abnormal);
io = ci.io;
if (bio->bi_opf & REQ_PREFLUSH) {
__send_empty_flush(&ci);
/* dm_io_complete submits any data associated with flush */
goto out;
}
error = __split_and_process_bio(&ci);
if (error || !ci.sector_count)
goto out;
/*
* Remainder must be passed to submit_bio_noacct() so it gets handled
* *after* bios already submitted have been completely processed.
*/
bio_trim(bio, io->sectors, ci.sector_count);
trace_block_split(bio, bio->bi_iter.bi_sector);
bio_inc_remaining(bio);
submit_bio_noacct(bio);
out:
/*
* Drop the extra reference count for non-POLLED bio, and hold one
* reference for POLLED bio, which will be released in dm_poll_bio
*
* Add every dm_io instance into the dm_io list head which is stored
* in bio->bi_private, so that dm_poll_bio can poll them all.
*/
if (error || !ci.submit_as_polled) {
/*
* In case of submission failure, the extra reference for
* submitting io isn't consumed yet
*/
if (error)
atomic_dec(&io->io_count);
dm_io_dec_pending(io, error);
} else
dm_queue_poll_io(bio, io);
}
static void dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
/* If suspended, or map not yet available, queue this IO for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
unlikely(!map)) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)
bio_io_error(bio);
else
queue_io(md, bio);
goto out;
}
dm_split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx);
}
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
unsigned int flags)
{
WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
/* don't poll if the mapped io is done */
if (atomic_read(&io->io_count) > 1)
bio_poll(&io->tio.clone, iob, flags);
/* bio_poll holds the last reference */
return atomic_read(&io->io_count) == 1;
}
static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
unsigned int flags)
{
struct dm_io **head = dm_poll_list_head(bio);
struct dm_io *list = *head;
struct dm_io *tmp = NULL;
struct dm_io *curr, *next;
/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
if (!(bio->bi_opf & REQ_DM_POLL_LIST))
return 0;
WARN_ON_ONCE(!list);
/*
* Restore .bi_private before possibly completing dm_io.
*
* bio_poll() is only possible once @bio has been completely
* submitted via submit_bio_noacct()'s depth-first submission.
* So there is no dm_queue_poll_io() race associated with
* clearing REQ_DM_POLL_LIST here.
*/
bio->bi_opf &= ~REQ_DM_POLL_LIST;
bio->bi_private = list->data;
for (curr = list, next = curr->next; curr; curr = next, next =
curr ? curr->next : NULL) {
if (dm_poll_dm_io(curr, iob, flags)) {
/*
* clone_endio() has already occurred, so no
* error handling is needed here.
*/
__dm_io_dec_pending(curr);
} else {
curr->next = tmp;
tmp = curr;
}
}
/* Not done? */
if (tmp) {
bio->bi_opf |= REQ_DM_POLL_LIST;
/* Reset bio->bi_private to dm_io list head */
*head = tmp;
return 0;
}
return 1;
}
/*
*---------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------
*/
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
idr_remove(&_minor_idr, minor);
spin_unlock(&_minor_lock);
}
/*
* See if the device with a specific minor # is free.
*/
static int specific_minor(int minor)
{
int r;
if (minor >= (1 << MINORBITS))
return -EINVAL;
idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
spin_unlock(&_minor_lock);
idr_preload_end();
if (r < 0)
return r == -ENOSPC ? -EBUSY : r;
return 0;
}
static int next_free_minor(int *minor)
{
int r;
idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
spin_unlock(&_minor_lock);
idr_preload_end();
if (r < 0)
return r;
*minor = r;
return 0;
}
static const struct block_device_operations dm_blk_dops;
static const struct block_device_operations dm_rq_blk_dops;
static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
dm_destroy_crypto_profile(q->crypto_profile);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
destroy_workqueue(md->wq);
dm_free_md_mempools(md->mempools);
if (md->dax_dev) {
dax_remove_host(md->disk);
kill_dax(md->dax_dev);
put_dax(md->dax_dev);
md->dax_dev = NULL;
}
dm_cleanup_zoned_dev(md);
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
if (dm_get_md_type(md) != DM_TYPE_NONE) {
struct table_device *td;
dm_sysfs_exit(md);
list_for_each_entry(td, &md->table_devices, list) {
bd_unlink_disk_holder(td->dm_dev.bdev,
md->disk);
}
/*
* Hold lock to make sure del_gendisk() won't concurrent
* with open/close_table_device().
*/
mutex_lock(&md->table_devices_lock);
del_gendisk(md->disk);
mutex_unlock(&md->table_devices_lock);
}
dm_queue_destroy_crypto_profile(md->queue);
put_disk(md->disk);
}
if (md->pending_io) {
free_percpu(md->pending_io);
md->pending_io = NULL;
}
cleanup_srcu_struct(&md->io_barrier);
mutex_destroy(&md->suspend_lock);
mutex_destroy(&md->type_lock);
mutex_destroy(&md->table_devices_lock);
mutex_destroy(&md->swap_bios_lock);
dm_mq_cleanup_mapped_device(md);
}
/*
* Allocate and initialise a blank device with a given minor.
*/
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
struct mapped_device *md;
void *old_md;
md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
DMERR("unable to allocate device, out of memory.");
return NULL;
}
if (!try_module_get(THIS_MODULE))
goto bad_module_get;
/* get a minor number for the dev */
if (minor == DM_ANY_MINOR)
r = next_free_minor(&minor);
else
r = specific_minor(minor);
if (r < 0)
goto bad_minor;
r = init_srcu_struct(&md->io_barrier);
if (r < 0)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
mutex_init(&md->table_devices_lock);
spin_lock_init(&md->deferred_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
atomic_set(&md->event_nr, 0);
atomic_set(&md->uevent_seq, 0);
INIT_LIST_HEAD(&md->uevent_list);
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
/*
* default to bio-based until DM table is loaded and md->type
* established. If request-based table is loaded: blk-mq will
* override accordingly.
*/
md->disk = blk_alloc_disk(md->numa_node_id);
if (!md->disk)
goto bad;
md->queue = md->disk->queue;
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
md->requeue_list = NULL;
md->swap_bios = get_swap_bios();
sema_init(&md->swap_bios_semaphore, md->swap_bios);
mutex_init(&md->swap_bios_lock);
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->minors = 1;
md->disk->flags |= GENHD_FL_NO_PART;
md->disk->fops = &dm_blk_dops;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
if (IS_ENABLED(CONFIG_FS_DAX)) {
md->dax_dev = alloc_dax(md, &dm_dax_ops);
if (IS_ERR(md->dax_dev)) {
md->dax_dev = NULL;
goto bad;
}
set_dax_nocache(md->dax_dev);
set_dax_nomc(md->dax_dev);
if (dax_add_host(md->dax_dev, md->disk))
goto bad;
}
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
if (!md->wq)
goto bad;
md->pending_io = alloc_percpu(unsigned long);
if (!md->pending_io)
goto bad;
r = dm_stats_init(&md->stats);
if (r < 0)
goto bad;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
spin_unlock(&_minor_lock);
BUG_ON(old_md != MINOR_ALLOCED);
return md;
bad:
cleanup_mapped_device(md);
bad_io_barrier:
free_minor(minor);
bad_minor:
module_put(THIS_MODULE);
bad_module_get:
kvfree(md);
return NULL;
}
static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
cleanup_mapped_device(md);
WARN_ON_ONCE(!list_empty(&md->table_devices));
dm_stats_cleanup(&md->stats);
free_minor(minor);
module_put(THIS_MODULE);
kvfree(md);
}
/*
* Bind a table to the device.
*/
static void event_callback(void *context)
{
unsigned long flags;
LIST_HEAD(uevents);
struct mapped_device *md = context;
spin_lock_irqsave(&md->uevent_lock, flags);
list_splice_init(&md->uevent_list, &uevents);
spin_unlock_irqrestore(&md->uevent_lock, flags);
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
dm_issue_global_event();
}
/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
sector_t size;
int ret;
lockdep_assert_held(&md->suspend_lock);
size = dm_table_get_size(t);
/*
* Wipe any geometry if the size of the table changed.
*/
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
set_capacity(md->disk, size);
dm_table_event_callback(t, event_callback, md);
if (dm_table_request_based(t)) {
/*
* Leverage the fact that request-based DM targets are
* immutable singletons - used to optimize dm_mq_queue_rq.
*/
md->immutable_target = dm_table_get_immutable_target(t);
/*
* There is no need to reload with request-based dm because the
* size of front_pad doesn't change.
*
* Note for future: If you are to reload bioset, prep-ed
* requests in the queue may refer to bio from the old bioset,
* so you must walk through the queue to unprep.
*/
if (!md->mempools) {
md->mempools = t->mempools;
t->mempools = NULL;
}
} else {
/*
* The md may already have mempools that need changing.
* If so, reload bioset because front_pad may have changed
* because a different table was loaded.
*/
dm_free_md_mempools(md->mempools);
md->mempools = t->mempools;
t->mempools = NULL;
}
ret = dm_table_set_restrictions(t, md->queue, limits);
if (ret) {
old_map = ERR_PTR(ret);
goto out;
}
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
md->immutable_target_type = dm_table_get_immutable_target_type(t);
if (old_map)
dm_sync_table(md);
out:
return old_map;
}
/*
* Returns unbound table for the caller to free.
*/
static struct dm_table *__unbind(struct mapped_device *md)
{
struct dm_table *map = rcu_dereference_protected(md->map, 1);
if (!map)
return NULL;
dm_table_event_callback(map, NULL, NULL);
RCU_INIT_POINTER(md->map, NULL);
dm_sync_table(md);
return map;
}
/*
* Constructor for a new device.
*/
int dm_create(int minor, struct mapped_device **result)
{
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
dm_ima_reset_data(md);
*result = md;
return 0;
}
/*
* Functions to manage md->type.
* All are required to hold md->type_lock.
*/
void dm_lock_md_type(struct mapped_device *md)
{
mutex_lock(&md->type_lock);
}
void dm_unlock_md_type(struct mapped_device *md)
{
mutex_unlock(&md->type_lock);
}
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{
BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type;
}
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{
return md->type;
}
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
return md->immutable_target_type;
}
/*
* Setup the DM device's queue based on md's type
*/
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
enum dm_queue_mode type = dm_table_get_type(t);
struct queue_limits limits;
struct table_device *td;
int r;
switch (type) {
case DM_TYPE_REQUEST_BASED:
md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
}
r = dm_calculate_queue_limits(t, &limits);
if (r) {
DMERR("Cannot calculate initial queue limits");
return r;
}
r = dm_table_set_restrictions(t, md->queue, &limits);
if (r)
return r;
/*
* Hold lock to make sure add_disk() and del_gendisk() won't concurrent
* with open_table_device() and close_table_device().
*/
mutex_lock(&md->table_devices_lock);
r = add_disk(md->disk);
mutex_unlock(&md->table_devices_lock);
if (r)
return r;
/*
* Register the holder relationship for devices added before the disk
* was live.
*/
list_for_each_entry(td, &md->table_devices, list) {
r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
if (r)
goto out_undo_holders;
}
r = dm_sysfs_init(md);
if (r)
goto out_undo_holders;
md->type = type;
return 0;
out_undo_holders:
list_for_each_entry_continue_reverse(td, &md->table_devices, list)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
mutex_lock(&md->table_devices_lock);
del_gendisk(md->disk);
mutex_unlock(&md->table_devices_lock);
return r;
}
struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md;
unsigned int minor = MINOR(dev);
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
out:
spin_unlock(&_minor_lock);
return md;
}
EXPORT_SYMBOL_GPL(dm_get_md);
void *dm_get_mdptr(struct mapped_device *md)
{
return md->interface_ptr;
}
void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
md->interface_ptr = ptr;
}
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
int dm_hold(struct mapped_device *md)
{
spin_lock(&_minor_lock);
if (test_bit(DMF_FREEING, &md->flags)) {
spin_unlock(&_minor_lock);
return -EBUSY;
}
dm_get(md);
spin_unlock(&_minor_lock);
return 0;
}
EXPORT_SYMBOL_GPL(dm_hold);
const char *dm_device_name(struct mapped_device *md)
{
return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct dm_table *map;
int srcu_idx;
might_sleep();
spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
blk_mark_disk_dead(md->disk);
/*
* Take suspend_lock so that presuspend and postsuspend methods
* do not race with internal suspend.
*/
mutex_lock(&md->suspend_lock);
map = dm_get_live_table(md, &srcu_idx);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
mutex_unlock(&md->suspend_lock);
/*
* Rare, but there may be I/O requests still going to complete,
* for example. Wait for all references to disappear.
* No one should increment the reference count of the mapped_device,
* after the mapped_device state becomes DMF_FREEING.
*/
if (wait)
while (atomic_read(&md->holders))
fsleep(1000);
else if (atomic_read(&md->holders))
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
dm_table_destroy(__unbind(md));
free_dev(md);
}
void dm_destroy(struct mapped_device *md)
{
__dm_destroy(md, true);
}
void dm_destroy_immediate(struct mapped_device *md)
{
__dm_destroy(md, false);
}
void dm_put(struct mapped_device *md)
{
atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
static bool dm_in_flight_bios(struct mapped_device *md)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu)
sum += *per_cpu_ptr(md->pending_io, cpu);
return sum != 0;
}
static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
{
int r = 0;
DEFINE_WAIT(wait);
while (true) {
prepare_to_wait(&md->wait, &wait, task_state);
if (!dm_in_flight_bios(md))
break;
if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
io_schedule();
}
finish_wait(&md->wait, &wait);
smp_rmb();
return r;
}
static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
{
int r = 0;
if (!queue_is_mq(md->queue))
return dm_wait_for_bios_completion(md, task_state);
while (true) {
if (!blk_mq_queue_inflight(md->queue))
break;
if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
fsleep(5000);
}
return r;
}
/*
* Process the deferred bios
*/
static void dm_wq_work(struct work_struct *work)
{
struct mapped_device *md = container_of(work, struct mapped_device, work);
struct bio *bio;
while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
spin_lock_irq(&md->deferred_lock);
bio = bio_list_pop(&md->deferred);
spin_unlock_irq(&md->deferred_lock);
if (!bio)
break;
submit_bio_noacct(bio);
cond_resched();
}
}
static void dm_queue_flush(struct mapped_device *md)
{
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
smp_mb__after_atomic();
queue_work(md->wq, &md->work);
}
/*
* Swap in a new table, returning the old one for the caller to destroy.
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
mutex_lock(&md->suspend_lock);
/* device must be suspended */
if (!dm_suspended_md(md))
goto out;
/*
* If the new table has no data devices, retain the existing limits.
* This helps multipath with queue_if_no_path if all paths disappear,
* then new I/O is queued based on these limits, and then some paths
* reappear.
*/
if (dm_table_has_no_data_devices(table)) {
live_map = dm_get_live_table_fast(md);
if (live_map)
limits = md->queue->limits;
dm_put_live_table_fast(md);
}
if (!live_map) {
r = dm_calculate_queue_limits(table, &limits);
if (r) {
map = ERR_PTR(r);
goto out;
}
}
map = __bind(md, table, &limits);
dm_issue_global_event();
out:
mutex_unlock(&md->suspend_lock);
return map;
}
/*
* Functions to lock and unlock any filesystem running on the
* device.
*/
static int lock_fs(struct mapped_device *md)
{
int r;
WARN_ON(test_bit(DMF_FROZEN, &md->flags));
r = freeze_bdev(md->disk->part0);
if (!r)
set_bit(DMF_FROZEN, &md->flags);
return r;
}
static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
thaw_bdev(md->disk->part0);
clear_bit(DMF_FROZEN, &md->flags);
}
/*
* @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
* @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
* @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
*
* If __dm_suspend returns 0, the device is completely quiescent
* now. There is no request-processing activity. All new requests
* are being added to md->deferred list.
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
unsigned int suspend_flags, unsigned int task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
int r;
lockdep_assert_held(&md->suspend_lock);
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
* This flag is cleared before dm_suspend returns.
*/
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
else
DMDEBUG("%s: suspending with flush", dm_device_name(md));
/*
* This gets reverted if there's an error later and the targets
* provide the .presuspend_undo hook.
*/
dm_table_presuspend_targets(map);
/*
* Flush I/O to the device.
* Any I/O submitted after lock_fs() may not be flushed.
* noflush takes precedence over do_lockfs.
* (lock_fs() flushes I/Os and waits for them to complete.)
*/
if (!noflush && do_lockfs) {
r = lock_fs(md);
if (r) {
dm_table_presuspend_undo_targets(map);
return r;
}
}
/*
* Here we must make sure that no processes are submitting requests
* to target drivers i.e. no one may be executing
* dm_split_and_process_bio from dm_submit_bio.
*
* To get all processes out of dm_split_and_process_bio in dm_submit_bio,
* we take the write lock. To prevent any process from reentering
* dm_split_and_process_bio from dm_submit_bio and quiesce the thread
* (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
* flush_workqueue(md->wq).
*/
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
if (map)
synchronize_srcu(&md->io_barrier);
/*
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
if (dm_request_based(md))
dm_stop_queue(md->queue);
flush_workqueue(md->wq);
/*
* At this point no more requests are entering target request routines.
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
if (noflush)
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
if (map)
synchronize_srcu(&md->io_barrier);
/* were we interrupted ? */
if (r < 0) {
dm_queue_flush(md);
if (dm_request_based(md))
dm_start_queue(md->queue);
unlock_fs(md);
dm_table_presuspend_undo_targets(map);
/* pushback list is already flushed, so skip flush */
}
return r;
}
/*
* We need to be able to change a mapping table under a mounted
* filesystem. For example we might want to move some data in
* the background. Before the table can be swapped with
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
/*
* Suspend mechanism in request-based dm.
*
* 1. Flush all I/Os by lock_fs() if needed.
* 2. Stop dispatching any I/O by stopping the request_queue.
* 3. Wait for all in-flight I/Os to be completed or requeued.
*
* To abort suspend, start the request_queue.
*/
int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
{
struct dm_table *map = NULL;
int r = 0;
retry:
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (dm_suspended_md(md)) {
r = -EINVAL;
goto out_unlock;
}
if (dm_suspended_internally_md(md)) {
/* already internally suspended, wait for internal resume */
mutex_unlock(&md->suspend_lock);
r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
if (r)
return r;
goto retry;
}
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
if (!map) {
/* avoid deadlock with fs/namespace.c:do_mount() */
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
}
r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
if (r)
goto out_unlock;
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
return r;
}
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
if (map) {
int r = dm_table_resume_targets(map);
if (r)
return r;
}
dm_queue_flush(md);
/*
* Flushing deferred I/Os must be done after targets are resumed
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
if (dm_request_based(md))
dm_start_queue(md->queue);
unlock_fs(md);
return 0;
}
int dm_resume(struct mapped_device *md)
{
int r;
struct dm_table *map = NULL;
retry:
r = -EINVAL;
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (!dm_suspended_md(md))
goto out;
if (dm_suspended_internally_md(md)) {
/* already internally suspended, wait for internal resume */
mutex_unlock(&md->suspend_lock);
r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
if (r)
return r;
goto retry;
}
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
if (!map || !dm_table_get_size(map))
goto out;
r = __dm_resume(md, map);
if (r)
goto out;
clear_bit(DMF_SUSPENDED, &md->flags);
out:
mutex_unlock(&md->suspend_lock);
return r;
}
/*
* Internal suspend/resume works like userspace-driven suspend. It waits
* until all bios finish and prevents issuing new bios to the target drivers.
* It may be used only from the kernel.
*/
static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
{
struct dm_table *map = NULL;
lockdep_assert_held(&md->suspend_lock);
if (md->internal_suspend_count++)
return; /* nested internal suspend */
if (dm_suspended_md(md)) {
set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
return; /* nest suspend */
}
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
/*
* Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
* supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
* would require changing .presuspend to return an error -- avoid this
* until there is a need for more elaborate variants of internal suspend.
*/
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
{
BUG_ON(!md->internal_suspend_count);
if (--md->internal_suspend_count)
return; /* resume from nested internal suspend */
if (dm_suspended_md(md))
goto done; /* resume from nested suspend */
/*
* NOTE: existing callers don't need to call dm_table_resume_targets
* (which may fail -- so best to avoid it for now by passing NULL map)
*/
(void) __dm_resume(md, NULL);
done:
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
smp_mb__after_atomic();
wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
}
void dm_internal_suspend_noflush(struct mapped_device *md)
{
mutex_lock(&md->suspend_lock);
__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
void dm_internal_resume(struct mapped_device *md)
{
mutex_lock(&md->suspend_lock);
__dm_internal_resume(md);
mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume);
/*
* Fast variants of internal suspend/resume hold md->suspend_lock,
* which prevents interaction with userspace-driven suspend.
*/
void dm_internal_suspend_fast(struct mapped_device *md)
{
mutex_lock(&md->suspend_lock);
if (dm_suspended_md(md) || dm_suspended_internally_md(md))
return;
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
synchronize_srcu(&md->io_barrier);
flush_workqueue(md->wq);
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
void dm_internal_resume_fast(struct mapped_device *md)
{
if (dm_suspended_md(md) || dm_suspended_internally_md(md))
goto done;
dm_queue_flush(md);
done:
mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
/*
*---------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------
*/
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned int cookie, bool need_resize_uevent)
{
int r;
unsigned int noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[3] = { NULL, NULL, NULL };
char **envpp = envp;
if (cookie) {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
*envpp++ = udev_cookie;
}
if (need_resize_uevent) {
*envpp++ = "RESIZE=1";
}
noio_flag = memalloc_noio_save();
r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
memalloc_noio_restore(noio_flag);
return r;
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
return atomic_add_return(1, &md->uevent_seq);
}
uint32_t dm_get_event_nr(struct mapped_device *md)
{
return atomic_read(&md->event_nr);
}
int dm_wait_event(struct mapped_device *md, int event_nr)
{
return wait_event_interruptible(md->eventq,
(event_nr != atomic_read(&md->event_nr)));
}
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
unsigned long flags;
spin_lock_irqsave(&md->uevent_lock, flags);
list_add(elist, &md->uevent_list);
spin_unlock_irqrestore(&md->uevent_lock, flags);
}
/*
* The gendisk is only valid as long as you have a reference
* count on 'md'.
*/
struct gendisk *dm_disk(struct mapped_device *md)
{
return md->disk;
}
EXPORT_SYMBOL_GPL(dm_disk);
struct kobject *dm_kobject(struct mapped_device *md)
{
return &md->kobj_holder.kobj;
}
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
struct mapped_device *md;
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
spin_lock(&_minor_lock);
if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
out:
spin_unlock(&_minor_lock);
return md;
}
int dm_suspended_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED, &md->flags);
}
static int dm_post_suspending_md(struct mapped_device *md)
{
return test_bit(DMF_POST_SUSPENDING, &md->flags);
}
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
}
int dm_test_deferred_remove_flag(struct mapped_device *md)
{
return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
}
int dm_suspended(struct dm_target *ti)
{
return dm_suspended_md(ti->table->md);
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti)
{
return dm_post_suspending_md(ti->table->md);
}
EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(ti->table->md);
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
void dm_free_md_mempools(struct dm_md_mempools *pools)
{
if (!pools)
return;
bioset_exit(&pools->bs);
bioset_exit(&pools->io_bs);
kfree(pools);
}
struct dm_pr {
u64 old_key;
u64 new_key;
u32 flags;
bool abort;
bool fail_early;
int ret;
enum pr_type type;
struct pr_keys *read_keys;
struct pr_held_reservation *rsv;
};
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
struct dm_pr *pr)
{
struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_table *table;
struct dm_target *ti;
int ret = -ENOTTY, srcu_idx;
table = dm_get_live_table(md, &srcu_idx);
if (!table || !dm_table_get_size(table))
goto out;
/* We only support devices that have a single target */
if (table->num_targets != 1)
goto out;
ti = dm_table_get_target(table, 0);
if (dm_suspended_md(md)) {
ret = -EAGAIN;
goto out;
}
ret = -EINVAL;
if (!ti->type->iterate_devices)
goto out;
ti->type->iterate_devices(ti, fn, pr);
ret = 0;
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
/*
* For register / unregister we need to manually call out to every path.
*/
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
int ret;
if (!ops || !ops->pr_register) {
pr->ret = -EOPNOTSUPP;
return -1;
}
ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
if (!ret)
return 0;
if (!pr->ret)
pr->ret = ret;
if (pr->fail_early)
return -1;
return 0;
}
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
u32 flags)
{
struct dm_pr pr = {
.old_key = old_key,
.new_key = new_key,
.flags = flags,
.fail_early = true,
.ret = 0,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_register, &pr);
if (ret) {
/* Didn't even get to register a path */
return ret;
}
if (!pr.ret)
return 0;
ret = pr.ret;
if (!new_key)
return ret;
/* unregister all paths if we failed to register any path */
pr.old_key = new_key;
pr.new_key = 0;
pr.flags = 0;
pr.fail_early = false;
(void) dm_call_pr(bdev, __dm_pr_register, &pr);
return ret;
}
static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
if (!ops || !ops->pr_reserve) {
pr->ret = -EOPNOTSUPP;
return -1;
}
pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
if (!pr->ret)
return -1;
return 0;
}
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
u32 flags)
{
struct dm_pr pr = {
.old_key = key,
.flags = flags,
.type = type,
.fail_early = false,
.ret = 0,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
if (ret)
return ret;
return pr.ret;
}
/*
* If there is a non-All Registrants type of reservation, the release must be
* sent down the holding path. For the cases where there is no reservation or
* the path is not the holder the device will also return success, so we must
* try each path to make sure we got the correct path.
*/
static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
if (!ops || !ops->pr_release) {
pr->ret = -EOPNOTSUPP;
return -1;
}
pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
if (pr->ret)
return -1;
return 0;
}
static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
struct dm_pr pr = {
.old_key = key,
.type = type,
.fail_early = false,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_release, &pr);
if (ret)
return ret;
return pr.ret;
}
static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
if (!ops || !ops->pr_preempt) {
pr->ret = -EOPNOTSUPP;
return -1;
}
pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
pr->abort);
if (!pr->ret)
return -1;
return 0;
}
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort)
{
struct dm_pr pr = {
.new_key = new_key,
.old_key = old_key,
.type = type,
.fail_early = false,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
if (ret)
return ret;
return pr.ret;
}
static int dm_pr_clear(struct block_device *bdev, u64 key)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_clear)
r = ops->pr_clear(bdev, key);
else
r = -EOPNOTSUPP;
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
}
static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
if (!ops || !ops->pr_read_keys) {
pr->ret = -EOPNOTSUPP;
return -1;
}
pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
if (!pr->ret)
return -1;
return 0;
}
static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
{
struct dm_pr pr = {
.read_keys = keys,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
if (ret)
return ret;
return pr.ret;
}
static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
if (!ops || !ops->pr_read_reservation) {
pr->ret = -EOPNOTSUPP;
return -1;
}
pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
if (!pr->ret)
return -1;
return 0;
}
static int dm_pr_read_reservation(struct block_device *bdev,
struct pr_held_reservation *rsv)
{
struct dm_pr pr = {
.rsv = rsv,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
if (ret)
return ret;
return pr.ret;
}
static const struct pr_ops dm_pr_ops = {
.pr_register = dm_pr_register,
.pr_reserve = dm_pr_reserve,
.pr_release = dm_pr_release,
.pr_preempt = dm_pr_preempt,
.pr_clear = dm_pr_clear,
.pr_read_keys = dm_pr_read_keys,
.pr_read_reservation = dm_pr_read_reservation,
};
static const struct block_device_operations dm_blk_dops = {
.submit_bio = dm_submit_bio,
.poll_bio = dm_poll_bio,
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.report_zones = dm_blk_report_zones,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
static const struct block_device_operations dm_rq_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
.zero_page_range = dm_dax_zero_page_range,
.recovery_write = dm_dax_recovery_write,
};
/*
* module hooks
*/
module_init(dm_init);
module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
module_param(reserved_bio_based_ios, uint, 0644);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(dm_numa_node, int, 0644);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
module_param(swap_bios, int, 0644);
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/md/dm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
* Author: Mikulas Patocka <[email protected]>
*
* Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
*
* In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
* default prefetch value. Data are read in "prefetch_cluster" chunks from the
* hash device. Setting this greatly improves performance when data and hash
* are on the same disk on different partitions on devices with poor random
* access behavior.
*/
#include "dm-verity.h"
#include "dm-verity-fec.h"
#include "dm-verity-verify-sig.h"
#include "dm-audit.h"
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/jump_label.h>
#define DM_MSG_PREFIX "verity"
#define DM_VERITY_ENV_LENGTH 42
#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
#define DM_VERITY_MAX_CORRUPTED_ERRS 100
#define DM_VERITY_OPT_LOGGING "ignore_corruption"
#define DM_VERITY_OPT_RESTART "restart_on_corruption"
#define DM_VERITY_OPT_PANIC "panic_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
sector_t block;
unsigned int n_blocks;
};
/*
* Auxiliary structure appended to each dm-bufio buffer. If the value
* hash_verified is nonzero, hash of the block has been verified.
*
* The variable hash_verified is set to 0 when allocating the buffer, then
* it can be changed to 1 and it is never reset to 0 again.
*
* There is no lock around this value, a race condition can at worst cause
* that multiple processes verify the hash of the same buffer simultaneously
* and write 1 to hash_verified simultaneously.
* This condition is harmless, so we don't need locking.
*/
struct buffer_aux {
int hash_verified;
};
/*
* Initialize struct buffer_aux for a freshly created buffer.
*/
static void dm_bufio_alloc_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
aux->hash_verified = 0;
}
/*
* Translate input sector number to the sector number on the target device.
*/
static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
{
return v->data_start + dm_target_offset(v->ti, bi_sector);
}
/*
* Return hash position of a specified block at a specified tree level
* (0 is the lowest level).
* The lowest "hash_per_block_bits"-bits of the result denote hash position
* inside a hash block. The remaining bits denote location of the hash block.
*/
static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
int level)
{
return block >> (level * v->hash_per_block_bits);
}
static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len,
struct crypto_wait *wait)
{
struct scatterlist sg;
if (likely(!is_vmalloc_addr(data))) {
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
return crypto_wait_req(crypto_ahash_update(req), wait);
}
do {
int r;
size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
flush_kernel_vmap_range((void *)data, this_step);
sg_init_table(&sg, 1);
sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
ahash_request_set_crypt(req, &sg, NULL, this_step);
r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r))
return r;
data += this_step;
len -= this_step;
} while (len);
return 0;
}
/*
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
struct crypto_wait *wait)
{
int r;
ahash_request_set_tfm(req, v->tfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, (void *)wait);
crypto_init_wait(wait);
r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
DMERR("crypto_ahash_init failed: %d", r);
return r;
}
if (likely(v->salt_size && (v->version >= 1)))
r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
return r;
}
static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
u8 *digest, struct crypto_wait *wait)
{
int r;
if (unlikely(v->salt_size && (!v->version))) {
r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
DMERR("%s failed updating salt: %d", __func__, r);
goto out;
}
}
ahash_request_set_crypt(req, NULL, digest, 0);
r = crypto_wait_req(crypto_ahash_final(req), wait);
out:
return r;
}
int verity_hash(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len, u8 *digest)
{
int r;
struct crypto_wait wait;
r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
goto out;
r = verity_hash_update(v, req, data, len, &wait);
if (unlikely(r < 0))
goto out;
r = verity_hash_final(v, req, digest, &wait);
out:
return r;
}
static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
sector_t *hash_block, unsigned int *offset)
{
sector_t position = verity_position_at_level(v, block, level);
unsigned int idx;
*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
if (!offset)
return;
idx = position & ((1 << v->hash_per_block_bits) - 1);
if (!v->version)
*offset = idx * v->digest_size;
else
*offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
}
/*
* Handle verification errors.
*/
static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
unsigned long long block)
{
char verity_env[DM_VERITY_ENV_LENGTH];
char *envp[] = { verity_env, NULL };
const char *type_str = "";
struct mapped_device *md = dm_table_get_md(v->ti->table);
/* Corruption should be visible in device status in all modes */
v->hash_failed = true;
if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
goto out;
v->corrupted_errs++;
switch (type) {
case DM_VERITY_BLOCK_TYPE_DATA:
type_str = "data";
break;
case DM_VERITY_BLOCK_TYPE_METADATA:
type_str = "metadata";
break;
default:
BUG();
}
DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
type_str, block);
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) {
DMERR("%s: reached maximum errors", v->data_dev->name);
dm_audit_log_target(DM_MSG_PREFIX, "max-corrupted-errors", v->ti, 0);
}
snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
DM_VERITY_ENV_VAR_NAME, type, block);
kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
out:
if (v->mode == DM_VERITY_MODE_LOGGING)
return 0;
if (v->mode == DM_VERITY_MODE_RESTART)
kernel_restart("dm-verity device corrupted");
if (v->mode == DM_VERITY_MODE_PANIC)
panic("dm-verity device corrupted");
return 1;
}
/*
* Verify hash of a metadata block pertaining to the specified data block
* ("block" argument) at a specified level ("level" argument).
*
* On successful return, verity_io_want_digest(v, io) contains the hash value
* for a lower tree level or for the data block (if we're at the lowest level).
*
* If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
* If "skip_unverified" is false, unverified buffer is hashed and verified
* against current value of verity_io_want_digest(v, io).
*/
static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, int level, bool skip_unverified,
u8 *want_digest)
{
struct dm_buffer *buf;
struct buffer_aux *aux;
u8 *data;
int r;
sector_t hash_block;
unsigned int offset;
verity_hash_at_level(v, block, level, &hash_block, &offset);
if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
data = dm_bufio_get(v->bufio, hash_block, &buf);
if (data == NULL) {
/*
* In tasklet and the hash was not in the bufio cache.
* Return early and resume execution from a work-queue
* to read the hash from disk.
*/
return -EAGAIN;
}
} else
data = dm_bufio_read(v->bufio, hash_block, &buf);
if (IS_ERR(data))
return PTR_ERR(data);
aux = dm_bufio_get_aux_data(buf);
if (!aux->hash_verified) {
if (skip_unverified) {
r = 1;
goto release_ret_r;
}
r = verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->hash_dev_block_bits,
verity_io_real_digest(v, io));
if (unlikely(r < 0))
goto release_ret_r;
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
else if (static_branch_unlikely(&use_tasklet_enabled) &&
io->in_tasklet) {
/*
* Error handling code (FEC included) cannot be run in a
* tasklet since it may sleep, so fallback to work-queue.
*/
r = -EAGAIN;
goto release_ret_r;
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
hash_block, data, NULL) == 0)
aux->hash_verified = 1;
else if (verity_handle_err(v,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block)) {
struct bio *bio =
dm_bio_from_per_bio_data(io,
v->ti->per_io_data_size);
dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
block, 0);
r = -EIO;
goto release_ret_r;
}
}
data += offset;
memcpy(want_digest, data, v->digest_size);
r = 0;
release_ret_r:
dm_bufio_release(buf);
return r;
}
/*
* Find a hash for a given block, write it to digest and verify the integrity
* of the hash tree if necessary.
*/
int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero)
{
int r = 0, i;
if (likely(v->levels)) {
/*
* First, we try to get the requested hash for
* the current block. If the hash block itself is
* verified, zero is returned. If it isn't, this
* function returns 1 and we fall back to whole
* chain verification.
*/
r = verity_verify_level(v, io, block, 0, true, digest);
if (likely(r <= 0))
goto out;
}
memcpy(digest, v->root_digest, v->digest_size);
for (i = v->levels - 1; i >= 0; i--) {
r = verity_verify_level(v, io, block, i, false, digest);
if (unlikely(r))
goto out;
}
out:
if (!r && v->zero_digest)
*is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
else
*is_zero = false;
return r;
}
/*
* Calculates the digest for the given bio
*/
static int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter, struct crypto_wait *wait)
{
unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
struct scatterlist sg;
struct ahash_request *req = verity_io_hash_req(v, io);
do {
int r;
unsigned int len;
struct bio_vec bv = bio_iter_iovec(bio, *iter);
sg_init_table(&sg, 1);
len = bv.bv_len;
if (likely(len >= todo))
len = todo;
/*
* Operating on a single page at a time looks suboptimal
* until you consider the typical block size is 4,096B.
* Going through this loops twice should be very rare.
*/
sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, len);
r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r < 0)) {
DMERR("%s crypto op failed: %d", __func__, r);
return r;
}
bio_advance_iter(bio, iter, len);
todo -= len;
} while (todo);
return 0;
}
/*
* Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
* starting from iter.
*/
int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter,
int (*process)(struct dm_verity *v,
struct dm_verity_io *io, u8 *data,
size_t len))
{
unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
do {
int r;
u8 *page;
unsigned int len;
struct bio_vec bv = bio_iter_iovec(bio, *iter);
page = bvec_kmap_local(&bv);
len = bv.bv_len;
if (likely(len >= todo))
len = todo;
r = process(v, io, page, len);
kunmap_local(page);
if (r < 0)
return r;
bio_advance_iter(bio, iter, len);
todo -= len;
} while (todo);
return 0;
}
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len)
{
memset(data, 0, len);
return 0;
}
/*
* Moves the bio iter one data block forward.
*/
static inline void verity_bv_skip_block(struct dm_verity *v,
struct dm_verity_io *io,
struct bvec_iter *iter)
{
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
}
/*
* Verify one "dm_verity_io" structure.
*/
static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
#endif
struct bvec_iter iter_copy;
struct bvec_iter *iter;
struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
unsigned int b;
if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
/*
* Copy the iterator in case we need to restart
* verification in a work-queue.
*/
iter_copy = io->iter;
iter = &iter_copy;
} else
iter = &io->iter;
for (b = 0; b < io->n_blocks; b++) {
int r;
sector_t cur_block = io->block + b;
struct ahash_request *req = verity_io_hash_req(v, io);
if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
likely(test_bit(cur_block, v->validated_blocks))) {
verity_bv_skip_block(v, io, iter);
continue;
}
r = verity_hash_for_block(v, io, cur_block,
verity_io_want_digest(v, io),
&is_zero);
if (unlikely(r < 0))
return r;
if (is_zero) {
/*
* If we expect a zero block, don't validate, just
* return zeros.
*/
r = verity_for_bv_block(v, io, iter,
verity_bv_zero);
if (unlikely(r < 0))
return r;
continue;
}
r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
return r;
#if defined(CONFIG_DM_VERITY_FEC)
if (verity_fec_is_enabled(v))
start = *iter;
#endif
r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
r = verity_hash_final(v, req, verity_io_real_digest(v, io),
&wait);
if (unlikely(r < 0))
return r;
if (likely(memcmp(verity_io_real_digest(v, io),
verity_io_want_digest(v, io), v->digest_size) == 0)) {
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
} else if (static_branch_unlikely(&use_tasklet_enabled) &&
io->in_tasklet) {
/*
* Error handling code (FEC included) cannot be run in a
* tasklet since it may sleep, so fallback to work-queue.
*/
return -EAGAIN;
#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) {
continue;
#endif
} else {
if (bio->bi_status) {
/*
* Error correction failed; Just return error
*/
return -EIO;
}
if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
cur_block)) {
dm_audit_log_bio(DM_MSG_PREFIX, "verify-data",
bio, cur_block, 0);
return -EIO;
}
}
}
return 0;
}
/*
* Skip verity work in response to I/O error when system is shutting down.
*/
static inline bool verity_is_system_shutting_down(void)
{
return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
|| system_state == SYSTEM_RESTART;
}
/*
* End one "io" structure with a given error.
*/
static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
{
struct dm_verity *v = io->v;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_status = status;
if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
verity_fec_finish_io(io);
bio_endio(bio);
}
static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
io->in_tasklet = false;
verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
static void verity_tasklet(unsigned long data)
{
struct dm_verity_io *io = (struct dm_verity_io *)data;
int err;
io->in_tasklet = true;
err = verity_verify_io(io);
if (err == -EAGAIN) {
/* fallback to retrying with work-queue */
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
return;
}
verity_finish_io(io, errno_to_blk_status(err));
}
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
verity_finish_io(io, bio->bi_status);
return;
}
if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
tasklet_schedule(&io->tasklet);
} else {
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
}
}
/*
* Prefetch buffers for the specified io.
* The root buffer is not prefetched, it is assumed that it will be cached
* all the time.
*/
static void verity_prefetch_io(struct work_struct *work)
{
struct dm_verity_prefetch_work *pw =
container_of(work, struct dm_verity_prefetch_work, work);
struct dm_verity *v = pw->v;
int i;
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
sector_t hash_block_end;
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
goto no_prefetch_cluster;
if (unlikely(cluster & (cluster - 1)))
cluster = 1 << __fls(cluster);
hash_block_start &= ~(sector_t)(cluster - 1);
hash_block_end |= cluster - 1;
if (unlikely(hash_block_end >= v->hash_blocks))
hash_block_end = v->hash_blocks - 1;
}
no_prefetch_cluster:
dm_bufio_prefetch(v->bufio, hash_block_start,
hash_block_end - hash_block_start + 1);
}
kfree(pw);
}
static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
{
sector_t block = io->block;
unsigned int n_blocks = io->n_blocks;
struct dm_verity_prefetch_work *pw;
if (v->validated_blocks) {
while (n_blocks && test_bit(block, v->validated_blocks)) {
block++;
n_blocks--;
}
while (n_blocks && test_bit(block + n_blocks - 1,
v->validated_blocks))
n_blocks--;
if (!n_blocks)
return;
}
pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (!pw)
return;
INIT_WORK(&pw->work, verity_prefetch_io);
pw->v = v;
pw->block = block;
pw->n_blocks = n_blocks;
queue_work(v->verify_wq, &pw->work);
}
/*
* Bio map function. It allocates dm_verity_io structure and bio vector and
* fills them. Then it issues prefetches and the I/O.
*/
static int verity_map(struct dm_target *ti, struct bio *bio)
{
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
bio_set_dev(bio, v->data_dev->bdev);
bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return DM_MAPIO_KILL;
}
if (bio_end_sector(bio) >>
(v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
DMERR_LIMIT("io out of range");
return DM_MAPIO_KILL;
}
if (bio_data_dir(bio) == WRITE)
return DM_MAPIO_KILL;
io = dm_per_bio_data(bio, ti->per_io_data_size);
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
io->iter = bio->bi_iter;
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);
return DM_MAPIO_SUBMITTED;
}
/*
* Status: V (valid) or C (corruption found)
*/
static void verity_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_verity *v = ti->private;
unsigned int args = 0;
unsigned int sz = 0;
unsigned int x;
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%c", v->hash_failed ? 'C' : 'V');
break;
case STATUSTYPE_TABLE:
DMEMIT("%u %s %s %u %u %llu %llu %s ",
v->version,
v->data_dev->name,
v->hash_dev->name,
1 << v->data_dev_block_bits,
1 << v->hash_dev_block_bits,
(unsigned long long)v->data_blocks,
(unsigned long long)v->hash_start,
v->alg_name
);
for (x = 0; x < v->digest_size; x++)
DMEMIT("%02x", v->root_digest[x]);
DMEMIT(" ");
if (!v->salt_size)
DMEMIT("-");
else
for (x = 0; x < v->salt_size; x++)
DMEMIT("%02x", v->salt[x]);
if (v->mode != DM_VERITY_MODE_EIO)
args++;
if (verity_fec_is_enabled(v))
args += DM_VERITY_OPTS_FEC;
if (v->zero_digest)
args++;
if (v->validated_blocks)
args++;
if (v->use_tasklet)
args++;
if (v->signature_key_desc)
args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
if (!args)
return;
DMEMIT(" %u", args);
if (v->mode != DM_VERITY_MODE_EIO) {
DMEMIT(" ");
switch (v->mode) {
case DM_VERITY_MODE_LOGGING:
DMEMIT(DM_VERITY_OPT_LOGGING);
break;
case DM_VERITY_MODE_RESTART:
DMEMIT(DM_VERITY_OPT_RESTART);
break;
case DM_VERITY_MODE_PANIC:
DMEMIT(DM_VERITY_OPT_PANIC);
break;
default:
BUG();
}
}
if (v->zero_digest)
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
if (v->use_tasklet)
DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
sz = verity_fec_status_table(v, sz, result, maxlen);
if (v->signature_key_desc)
DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
" %s", v->signature_key_desc);
break;
case STATUSTYPE_IMA:
DMEMIT_TARGET_NAME_VERSION(ti->type);
DMEMIT(",hash_failed=%c", v->hash_failed ? 'C' : 'V');
DMEMIT(",verity_version=%u", v->version);
DMEMIT(",data_device_name=%s", v->data_dev->name);
DMEMIT(",hash_device_name=%s", v->hash_dev->name);
DMEMIT(",verity_algorithm=%s", v->alg_name);
DMEMIT(",root_digest=");
for (x = 0; x < v->digest_size; x++)
DMEMIT("%02x", v->root_digest[x]);
DMEMIT(",salt=");
if (!v->salt_size)
DMEMIT("-");
else
for (x = 0; x < v->salt_size; x++)
DMEMIT("%02x", v->salt[x]);
DMEMIT(",ignore_zero_blocks=%c", v->zero_digest ? 'y' : 'n');
DMEMIT(",check_at_most_once=%c", v->validated_blocks ? 'y' : 'n');
if (v->signature_key_desc)
DMEMIT(",root_hash_sig_key_desc=%s", v->signature_key_desc);
if (v->mode != DM_VERITY_MODE_EIO) {
DMEMIT(",verity_mode=");
switch (v->mode) {
case DM_VERITY_MODE_LOGGING:
DMEMIT(DM_VERITY_OPT_LOGGING);
break;
case DM_VERITY_MODE_RESTART:
DMEMIT(DM_VERITY_OPT_RESTART);
break;
case DM_VERITY_MODE_PANIC:
DMEMIT(DM_VERITY_OPT_PANIC);
break;
default:
DMEMIT("invalid");
}
}
DMEMIT(";");
break;
}
}
static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dm_verity *v = ti->private;
*bdev = v->data_dev->bdev;
if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev))
return 1;
return 0;
}
static int verity_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dm_verity *v = ti->private;
return fn(ti, v->data_dev, v->data_start, ti->len, data);
}
static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dm_verity *v = ti->private;
if (limits->logical_block_size < 1 << v->data_dev_block_bits)
limits->logical_block_size = 1 << v->data_dev_block_bits;
if (limits->physical_block_size < 1 << v->data_dev_block_bits)
limits->physical_block_size = 1 << v->data_dev_block_bits;
blk_limits_io_min(limits, limits->logical_block_size);
}
static void verity_dtr(struct dm_target *ti)
{
struct dm_verity *v = ti->private;
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
kvfree(v->validated_blocks);
kfree(v->salt);
kfree(v->root_digest);
kfree(v->zero_digest);
if (v->tfm)
crypto_free_ahash(v->tfm);
kfree(v->alg_name);
if (v->hash_dev)
dm_put_device(ti, v->hash_dev);
if (v->data_dev)
dm_put_device(ti, v->data_dev);
verity_fec_dtr(v);
kfree(v->signature_key_desc);
if (v->use_tasklet)
static_branch_dec(&use_tasklet_enabled);
kfree(v);
dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
}
static int verity_alloc_most_once(struct dm_verity *v)
{
struct dm_target *ti = v->ti;
/* the bitset can only handle INT_MAX blocks */
if (v->data_blocks > INT_MAX) {
ti->error = "device too large to use check_at_most_once";
return -E2BIG;
}
v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks),
sizeof(unsigned long),
GFP_KERNEL);
if (!v->validated_blocks) {
ti->error = "failed to allocate bitset for check_at_most_once";
return -ENOMEM;
}
return 0;
}
static int verity_alloc_zero_digest(struct dm_verity *v)
{
int r = -ENOMEM;
struct ahash_request *req;
u8 *zero_data;
v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
if (!v->zero_digest)
return r;
req = kmalloc(v->ahash_reqsize, GFP_KERNEL);
if (!req)
return r; /* verity_dtr will free zero_digest */
zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
if (!zero_data)
goto out;
r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
v->zero_digest);
out:
kfree(req);
kfree(zero_data);
return r;
}
static inline bool verity_is_verity_mode(const char *arg_name)
{
return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) ||
!strcasecmp(arg_name, DM_VERITY_OPT_RESTART) ||
!strcasecmp(arg_name, DM_VERITY_OPT_PANIC));
}
static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
{
if (v->mode)
return -EINVAL;
if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING))
v->mode = DM_VERITY_MODE_LOGGING;
else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART))
v->mode = DM_VERITY_MODE_RESTART;
else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC))
v->mode = DM_VERITY_MODE_PANIC;
return 0;
}
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
struct dm_verity_sig_opts *verify_args,
bool only_modifier_opts)
{
int r = 0;
unsigned int argc;
struct dm_target *ti = v->ti;
const char *arg_name;
static const struct dm_arg _args[] = {
{0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
};
r = dm_read_arg_group(_args, as, &argc, &ti->error);
if (r)
return -EINVAL;
if (!argc)
return 0;
do {
arg_name = dm_shift_arg(as);
argc--;
if (verity_is_verity_mode(arg_name)) {
if (only_modifier_opts)
continue;
r = verity_parse_verity_mode(v, arg_name);
if (r) {
ti->error = "Conflicting error handling parameters";
return r;
}
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
if (only_modifier_opts)
continue;
r = verity_alloc_zero_digest(v);
if (r) {
ti->error = "Cannot allocate zero digest";
return r;
}
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
if (only_modifier_opts)
continue;
r = verity_alloc_most_once(v);
if (r)
return r;
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
v->use_tasklet = true;
static_branch_inc(&use_tasklet_enabled);
continue;
} else if (verity_is_fec_opt_arg(arg_name)) {
if (only_modifier_opts)
continue;
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
return r;
continue;
} else if (verity_verify_is_sig_opt_arg(arg_name)) {
if (only_modifier_opts)
continue;
r = verity_verify_sig_parse_opt_args(as, v,
verify_args,
&argc, arg_name);
if (r)
return r;
continue;
} else if (only_modifier_opts) {
/*
* Ignore unrecognized opt, could easily be an extra
* argument to an option whose parsing was skipped.
* Normal parsing (@only_modifier_opts=false) will
* properly parse all options (and their extra args).
*/
continue;
}
DMERR("Unrecognized verity feature request: %s", arg_name);
ti->error = "Unrecognized verity feature request";
return -EINVAL;
} while (argc && !r);
return r;
}
/*
* Target parameters:
* <version> The current format is version 1.
* Vsn 0 is compatible with original Chromium OS releases.
* <data device>
* <hash device>
* <data block size>
* <hash block size>
* <the number of data blocks>
* <hash start block>
* <algorithm>
* <digest>
* <salt> Hex string or "-" if no salt.
*/
static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_verity *v;
struct dm_verity_sig_opts verify_args = {0};
struct dm_arg_set as;
unsigned int num;
unsigned long long num_ll;
int r;
int i;
sector_t hash_position;
char dummy;
char *root_hash_digest_to_validate;
v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
if (!v) {
ti->error = "Cannot allocate verity structure";
return -ENOMEM;
}
ti->private = v;
v->ti = ti;
r = verity_fec_ctr_alloc(v);
if (r)
goto bad;
if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) {
ti->error = "Device must be readonly";
r = -EINVAL;
goto bad;
}
if (argc < 10) {
ti->error = "Not enough arguments";
r = -EINVAL;
goto bad;
}
/* Parse optional parameters that modify primary args */
if (argc > 10) {
as.argc = argc - 10;
as.argv = argv + 10;
r = verity_parse_opt_args(&as, v, &verify_args, true);
if (r < 0)
goto bad;
}
if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
num > 1) {
ti->error = "Invalid version";
r = -EINVAL;
goto bad;
}
v->version = num;
r = dm_get_device(ti, argv[1], BLK_OPEN_READ, &v->data_dev);
if (r) {
ti->error = "Data device lookup failed";
goto bad;
}
r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &v->hash_dev);
if (r) {
ti->error = "Hash device lookup failed";
goto bad;
}
if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
!num || (num & (num - 1)) ||
num < bdev_logical_block_size(v->data_dev->bdev) ||
num > PAGE_SIZE) {
ti->error = "Invalid data device block size";
r = -EINVAL;
goto bad;
}
v->data_dev_block_bits = __ffs(num);
if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
!num || (num & (num - 1)) ||
num < bdev_logical_block_size(v->hash_dev->bdev) ||
num > INT_MAX) {
ti->error = "Invalid hash device block size";
r = -EINVAL;
goto bad;
}
v->hash_dev_block_bits = __ffs(num);
if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
(sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
>> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid data blocks";
r = -EINVAL;
goto bad;
}
v->data_blocks = num_ll;
if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
ti->error = "Data device is too small";
r = -EINVAL;
goto bad;
}
if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
(sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
>> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid hash start";
r = -EINVAL;
goto bad;
}
v->hash_start = num_ll;
v->alg_name = kstrdup(argv[7], GFP_KERNEL);
if (!v->alg_name) {
ti->error = "Cannot allocate algorithm name";
r = -ENOMEM;
goto bad;
}
v->tfm = crypto_alloc_ahash(v->alg_name, 0,
v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
v->tfm = NULL;
goto bad;
}
/*
* dm-verity performance can vary greatly depending on which hash
* algorithm implementation is used. Help people debug performance
* problems by logging the ->cra_driver_name.
*/
DMINFO("%s using implementation \"%s\"", v->alg_name,
crypto_hash_alg_common(v->tfm)->base.cra_driver_name);
v->digest_size = crypto_ahash_digestsize(v->tfm);
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
ti->error = "Digest size too big";
r = -EINVAL;
goto bad;
}
v->ahash_reqsize = sizeof(struct ahash_request) +
crypto_ahash_reqsize(v->tfm);
v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
if (!v->root_digest) {
ti->error = "Cannot allocate root digest";
r = -ENOMEM;
goto bad;
}
if (strlen(argv[8]) != v->digest_size * 2 ||
hex2bin(v->root_digest, argv[8], v->digest_size)) {
ti->error = "Invalid root digest";
r = -EINVAL;
goto bad;
}
root_hash_digest_to_validate = argv[8];
if (strcmp(argv[9], "-")) {
v->salt_size = strlen(argv[9]) / 2;
v->salt = kmalloc(v->salt_size, GFP_KERNEL);
if (!v->salt) {
ti->error = "Cannot allocate salt";
r = -ENOMEM;
goto bad;
}
if (strlen(argv[9]) != v->salt_size * 2 ||
hex2bin(v->salt, argv[9], v->salt_size)) {
ti->error = "Invalid salt";
r = -EINVAL;
goto bad;
}
}
argv += 10;
argc -= 10;
/* Optional parameters */
if (argc) {
as.argc = argc;
as.argv = argv;
r = verity_parse_opt_args(&as, v, &verify_args, false);
if (r < 0)
goto bad;
}
/* Root hash signature is a optional parameter*/
r = verity_verify_root_hash(root_hash_digest_to_validate,
strlen(root_hash_digest_to_validate),
verify_args.sig,
verify_args.sig_size);
if (r < 0) {
ti->error = "Root hash verification failed";
goto bad;
}
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);
v->levels = 0;
if (v->data_blocks)
while (v->hash_per_block_bits * v->levels < 64 &&
(unsigned long long)(v->data_blocks - 1) >>
(v->hash_per_block_bits * v->levels))
v->levels++;
if (v->levels > DM_VERITY_MAX_LEVELS) {
ti->error = "Too many tree levels";
r = -E2BIG;
goto bad;
}
hash_position = v->hash_start;
for (i = v->levels - 1; i >= 0; i--) {
sector_t s;
v->hash_level_block[i] = hash_position;
s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
>> ((i + 1) * v->hash_per_block_bits);
if (hash_position + s < hash_position) {
ti->error = "Hash device offset overflow";
r = -E2BIG;
goto bad;
}
hash_position += s;
}
v->hash_blocks = hash_position;
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
dm_bufio_alloc_callback, NULL,
v->use_tasklet ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
v->bufio = NULL;
goto bad;
}
if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
ti->error = "Hash device is too small";
r = -E2BIG;
goto bad;
}
/*
* Using WQ_HIGHPRI improves throughput and completion latency by
* reducing wait times when reading from a dm-verity device.
*
* Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
* allows verify_wq to preempt softirq since verification in tasklet
* will fall-back to using it for error handling (or if the bufio cache
* doesn't have required hashes).
*/
v->verify_wq = alloc_workqueue("kverityd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
goto bad;
}
ti->per_io_data_size = sizeof(struct dm_verity_io) +
v->ahash_reqsize + v->digest_size * 2;
r = verity_fec_ctr(v);
if (r)
goto bad;
ti->per_io_data_size = roundup(ti->per_io_data_size,
__alignof__(struct dm_verity_io));
verity_verify_sig_opts_cleanup(&verify_args);
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
return 0;
bad:
verity_verify_sig_opts_cleanup(&verify_args);
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
verity_dtr(ti);
return r;
}
/*
* Check whether a DM target is a verity target.
*/
bool dm_is_verity_target(struct dm_target *ti)
{
return ti->type->module == THIS_MODULE;
}
/*
* Get the verity mode (error behavior) of a verity target.
*
* Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
* target.
*/
int dm_verity_get_mode(struct dm_target *ti)
{
struct dm_verity *v = ti->private;
if (!dm_is_verity_target(ti))
return -EINVAL;
return v->mode;
}
/*
* Get the root digest of a verity target.
*
* Returns a copy of the root digest, the caller is responsible for
* freeing the memory of the digest.
*/
int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size)
{
struct dm_verity *v = ti->private;
if (!dm_is_verity_target(ti))
return -EINVAL;
*root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL);
if (*root_digest == NULL)
return -ENOMEM;
*digest_size = v->digest_size;
return 0;
}
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
.version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
.map = verity_map,
.status = verity_status,
.prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,
.io_hints = verity_io_hints,
};
module_dm(verity);
MODULE_AUTHOR("Mikulas Patocka <[email protected]>");
MODULE_AUTHOR("Mandeep Baines <[email protected]>");
MODULE_AUTHOR("Will Drewry <[email protected]>");
MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
MODULE_LICENSE("GPL");
| linux-master | drivers/md/dm-verity-target.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include "dm-bio-prison-v1.h"
#include "dm-bio-prison-v2.h"
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
/*----------------------------------------------------------------*/
#define MIN_CELLS 1024
struct prison_region {
spinlock_t lock;
struct rb_root cell;
} ____cacheline_aligned_in_smp;
struct dm_bio_prison {
mempool_t cell_pool;
unsigned int num_locks;
struct prison_region regions[];
};
static struct kmem_cache *_cell_cache;
/*----------------------------------------------------------------*/
/*
* @nr_cells should be the number of cells you want in use _concurrently_.
* Don't confuse it with the number of distinct keys.
*/
struct dm_bio_prison *dm_bio_prison_create(void)
{
int ret;
unsigned int i, num_locks;
struct dm_bio_prison *prison;
num_locks = dm_num_hash_locks();
prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL);
if (!prison)
return NULL;
prison->num_locks = num_locks;
for (i = 0; i < prison->num_locks; i++) {
spin_lock_init(&prison->regions[i].lock);
prison->regions[i].cell = RB_ROOT;
}
ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
if (ret) {
kfree(prison);
return NULL;
}
return prison;
}
EXPORT_SYMBOL_GPL(dm_bio_prison_create);
void dm_bio_prison_destroy(struct dm_bio_prison *prison)
{
mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
static void __setup_new_cell(struct dm_cell_key *key,
struct bio *holder,
struct dm_bio_prison_cell *cell)
{
memcpy(&cell->key, key, sizeof(cell->key));
cell->holder = holder;
bio_list_init(&cell->bios);
}
static int cmp_keys(struct dm_cell_key *lhs,
struct dm_cell_key *rhs)
{
if (lhs->virtual < rhs->virtual)
return -1;
if (lhs->virtual > rhs->virtual)
return 1;
if (lhs->dev < rhs->dev)
return -1;
if (lhs->dev > rhs->dev)
return 1;
if (lhs->block_end <= rhs->block_begin)
return -1;
if (lhs->block_begin >= rhs->block_end)
return 1;
return 0;
}
static inline unsigned int lock_nr(struct dm_cell_key *key, unsigned int num_locks)
{
return dm_hash_locks_index((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT),
num_locks);
}
bool dm_cell_key_has_valid_range(struct dm_cell_key *key)
{
if (WARN_ON_ONCE(key->block_end - key->block_begin > BIO_PRISON_MAX_RANGE))
return false;
if (WARN_ON_ONCE((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT) !=
(key->block_end - 1) >> BIO_PRISON_MAX_RANGE_SHIFT))
return false;
return true;
}
EXPORT_SYMBOL(dm_cell_key_has_valid_range);
static int __bio_detain(struct rb_root *root,
struct dm_cell_key *key,
struct bio *inmate,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
int r;
struct rb_node **new = &root->rb_node, *parent = NULL;
while (*new) {
struct dm_bio_prison_cell *cell =
rb_entry(*new, struct dm_bio_prison_cell, node);
r = cmp_keys(key, &cell->key);
parent = *new;
if (r < 0)
new = &((*new)->rb_left);
else if (r > 0)
new = &((*new)->rb_right);
else {
if (inmate)
bio_list_add(&cell->bios, inmate);
*cell_result = cell;
return 1;
}
}
__setup_new_cell(key, inmate, cell_prealloc);
*cell_result = cell_prealloc;
rb_link_node(&cell_prealloc->node, parent, new);
rb_insert_color(&cell_prealloc->node, root);
return 0;
}
static int bio_detain(struct dm_bio_prison *prison,
struct dm_cell_key *key,
struct bio *inmate,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
int r;
unsigned l = lock_nr(key, prison->num_locks);
spin_lock_irq(&prison->regions[l].lock);
r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result);
spin_unlock_irq(&prison->regions[l].lock);
return r;
}
int dm_bio_detain(struct dm_bio_prison *prison,
struct dm_cell_key *key,
struct bio *inmate,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
}
EXPORT_SYMBOL_GPL(dm_bio_detain);
int dm_get_cell(struct dm_bio_prison *prison,
struct dm_cell_key *key,
struct dm_bio_prison_cell *cell_prealloc,
struct dm_bio_prison_cell **cell_result)
{
return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
}
EXPORT_SYMBOL_GPL(dm_get_cell);
/*
* @inmates must have been initialised prior to this call
*/
static void __cell_release(struct rb_root *root,
struct dm_bio_prison_cell *cell,
struct bio_list *inmates)
{
rb_erase(&cell->node, root);
if (inmates) {
if (cell->holder)
bio_list_add(inmates, cell->holder);
bio_list_merge(inmates, &cell->bios);
}
}
void dm_cell_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *bios)
{
unsigned l = lock_nr(&cell->key, prison->num_locks);
spin_lock_irq(&prison->regions[l].lock);
__cell_release(&prison->regions[l].cell, cell, bios);
spin_unlock_irq(&prison->regions[l].lock);
}
EXPORT_SYMBOL_GPL(dm_cell_release);
/*
* Sometimes we don't want the holder, just the additional bios.
*/
static void __cell_release_no_holder(struct rb_root *root,
struct dm_bio_prison_cell *cell,
struct bio_list *inmates)
{
rb_erase(&cell->node, root);
bio_list_merge(inmates, &cell->bios);
}
void dm_cell_release_no_holder(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell,
struct bio_list *inmates)
{
unsigned l = lock_nr(&cell->key, prison->num_locks);
unsigned long flags;
spin_lock_irqsave(&prison->regions[l].lock, flags);
__cell_release_no_holder(&prison->regions[l].cell, cell, inmates);
spin_unlock_irqrestore(&prison->regions[l].lock, flags);
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
void dm_cell_error(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell, blk_status_t error)
{
struct bio_list bios;
struct bio *bio;
bio_list_init(&bios);
dm_cell_release(prison, cell, &bios);
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = error;
bio_endio(bio);
}
}
EXPORT_SYMBOL_GPL(dm_cell_error);
void dm_cell_visit_release(struct dm_bio_prison *prison,
void (*visit_fn)(void *, struct dm_bio_prison_cell *),
void *context,
struct dm_bio_prison_cell *cell)
{
unsigned l = lock_nr(&cell->key, prison->num_locks);
spin_lock_irq(&prison->regions[l].lock);
visit_fn(context, cell);
rb_erase(&cell->node, &prison->regions[l].cell);
spin_unlock_irq(&prison->regions[l].lock);
}
EXPORT_SYMBOL_GPL(dm_cell_visit_release);
static int __promote_or_release(struct rb_root *root,
struct dm_bio_prison_cell *cell)
{
if (bio_list_empty(&cell->bios)) {
rb_erase(&cell->node, root);
return 1;
}
cell->holder = bio_list_pop(&cell->bios);
return 0;
}
int dm_cell_promote_or_release(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
int r;
unsigned l = lock_nr(&cell->key, prison->num_locks);
spin_lock_irq(&prison->regions[l].lock);
r = __promote_or_release(&prison->regions[l].cell, cell);
spin_unlock_irq(&prison->regions[l].lock);
return r;
}
EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
/*----------------------------------------------------------------*/
#define DEFERRED_SET_SIZE 64
struct dm_deferred_entry {
struct dm_deferred_set *ds;
unsigned int count;
struct list_head work_items;
};
struct dm_deferred_set {
spinlock_t lock;
unsigned int current_entry;
unsigned int sweeper;
struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};
struct dm_deferred_set *dm_deferred_set_create(void)
{
int i;
struct dm_deferred_set *ds;
ds = kmalloc(sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
spin_lock_init(&ds->lock);
ds->current_entry = 0;
ds->sweeper = 0;
for (i = 0; i < DEFERRED_SET_SIZE; i++) {
ds->entries[i].ds = ds;
ds->entries[i].count = 0;
INIT_LIST_HEAD(&ds->entries[i].work_items);
}
return ds;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_create);
void dm_deferred_set_destroy(struct dm_deferred_set *ds)
{
kfree(ds);
}
EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
{
unsigned long flags;
struct dm_deferred_entry *entry;
spin_lock_irqsave(&ds->lock, flags);
entry = ds->entries + ds->current_entry;
entry->count++;
spin_unlock_irqrestore(&ds->lock, flags);
return entry;
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
static unsigned int ds_next(unsigned int index)
{
return (index + 1) % DEFERRED_SET_SIZE;
}
static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
{
while ((ds->sweeper != ds->current_entry) &&
!ds->entries[ds->sweeper].count) {
list_splice_init(&ds->entries[ds->sweeper].work_items, head);
ds->sweeper = ds_next(ds->sweeper);
}
if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
list_splice_init(&ds->entries[ds->sweeper].work_items, head);
}
void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
{
unsigned long flags;
spin_lock_irqsave(&entry->ds->lock, flags);
BUG_ON(!entry->count);
--entry->count;
__sweep(entry->ds, head);
spin_unlock_irqrestore(&entry->ds->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
/*
* Returns 1 if deferred or 0 if no pending items to delay job.
*/
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
unsigned int next_entry;
spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
!ds->entries[ds->current_entry].count)
r = 0;
else {
list_add(work, &ds->entries[ds->current_entry].work_items);
next_entry = ds_next(ds->current_entry);
if (!ds->entries[next_entry].count)
ds->current_entry = next_entry;
}
spin_unlock_irq(&ds->lock);
return r;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
/*----------------------------------------------------------------*/
static int __init dm_bio_prison_init_v1(void)
{
_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
if (!_cell_cache)
return -ENOMEM;
return 0;
}
static void dm_bio_prison_exit_v1(void)
{
kmem_cache_destroy(_cell_cache);
_cell_cache = NULL;
}
static int (*_inits[])(void) __initdata = {
dm_bio_prison_init_v1,
dm_bio_prison_init_v2,
};
static void (*_exits[])(void) = {
dm_bio_prison_exit_v1,
dm_bio_prison_exit_v2,
};
static int __init dm_bio_prison_init(void)
{
const int count = ARRAY_SIZE(_inits);
int r, i;
for (i = 0; i < count; i++) {
r = _inits[i]();
if (r)
goto bad;
}
return 0;
bad:
while (i--)
_exits[i]();
return r;
}
static void __exit dm_bio_prison_exit(void)
{
int i = ARRAY_SIZE(_exits);
while (i--)
_exits[i]();
}
/*
* module hooks
*/
module_init(dm_bio_prison_init);
module_exit(dm_bio_prison_exit);
MODULE_DESCRIPTION(DM_NAME " bio prison");
MODULE_AUTHOR("Joe Thornber <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/md/dm-bio-prison-v1.c |
// SPDX-License-Identifier: GPL-2.0
/* Maximum size of each resync request */
#define RESYNC_BLOCK_SIZE (64*1024)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
/*
* Number of guaranteed raid bios in case of extreme VM load:
*/
#define NR_RAID_BIOS 256
/* when we get a read error on a read-only array, we redirect to another
* device without failing the first device, or trying to over-write to
* correct the read error. To keep track of bad blocks on a per-bio
* level, we store IO_BLOCKED in the appropriate 'bios' pointer
*/
#define IO_BLOCKED ((struct bio *)1)
/* When we successfully write to a known bad-block, we need to remove the
* bad-block marking which must be done from process context. So we record
* the success by setting devs[n].bio to IO_MADE_GOOD
*/
#define IO_MADE_GOOD ((struct bio *)2)
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
#define MAX_PLUG_BIO 32
/* for managing resync I/O pages */
struct resync_pages {
void *raid_bio;
struct page *pages[RESYNC_PAGES];
};
struct raid1_plug_cb {
struct blk_plug_cb cb;
struct bio_list pending;
unsigned int count;
};
static void rbio_pool_free(void *rbio, void *data)
{
kfree(rbio);
}
static inline int resync_alloc_pages(struct resync_pages *rp,
gfp_t gfp_flags)
{
int i;
for (i = 0; i < RESYNC_PAGES; i++) {
rp->pages[i] = alloc_page(gfp_flags);
if (!rp->pages[i])
goto out_free;
}
return 0;
out_free:
while (--i >= 0)
put_page(rp->pages[i]);
return -ENOMEM;
}
static inline void resync_free_pages(struct resync_pages *rp)
{
int i;
for (i = 0; i < RESYNC_PAGES; i++)
put_page(rp->pages[i]);
}
static inline void resync_get_all_pages(struct resync_pages *rp)
{
int i;
for (i = 0; i < RESYNC_PAGES; i++)
get_page(rp->pages[i]);
}
static inline struct page *resync_fetch_page(struct resync_pages *rp,
unsigned idx)
{
if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
return NULL;
return rp->pages[idx];
}
/*
* 'strct resync_pages' stores actual pages used for doing the resync
* IO, and it is per-bio, so make .bi_private points to it.
*/
static inline struct resync_pages *get_resync_pages(struct bio *bio)
{
return bio->bi_private;
}
/* generally called after bio_reset() for reseting bvec */
static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
int size)
{
int idx = 0;
/* initialize bvec table again */
do {
struct page *page = resync_fetch_page(rp, idx);
int len = min_t(int, size, PAGE_SIZE);
if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
return;
}
size -= len;
} while (idx++ < RESYNC_PAGES && size > 0);
}
static inline void raid1_submit_write(struct bio *bio)
{
struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags))
bio_io_error(bio);
else if (unlikely(bio_op(bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(bio->bi_bdev)))
/* Just ignore it */
bio_endio(bio);
else
submit_bio_noacct(bio);
}
static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
blk_plug_cb_fn unplug, int copies)
{
struct raid1_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
/*
* If bitmap is not enabled, it's safe to submit the io directly, and
* this can get optimal performance.
*/
if (!md_bitmap_enabled(mddev->bitmap)) {
raid1_submit_write(bio);
return true;
}
cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
if (!cb)
return false;
plug = container_of(cb, struct raid1_plug_cb, cb);
bio_list_add(&plug->pending, bio);
if (++plug->count / MAX_PLUG_BIO >= copies) {
list_del(&cb->list);
cb->callback(cb, false);
}
return true;
}
/*
* current->bio_list will be set under submit_bio() context, in this case bitmap
* io will be added to the list and wait for current io submission to finish,
* while current io submission must wait for bitmap io to be done. In order to
* avoid such deadlock, submit bitmap io asynchronously.
*/
static inline void raid1_prepare_flush_writes(struct bitmap *bitmap)
{
if (current->bio_list)
md_bitmap_unplug_async(bitmap);
else
md_bitmap_unplug(bitmap);
}
| linux-master | drivers/md/raid1-10.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-cache-metadata.h"
#include "persistent-data/dm-array.h"
#include "persistent-data/dm-bitset.h"
#include "persistent-data/dm-space-map.h"
#include "persistent-data/dm-space-map-disk.h"
#include "persistent-data/dm-transaction-manager.h"
#include <linux/device-mapper.h>
#include <linux/refcount.h>
/*----------------------------------------------------------------*/
#define DM_MSG_PREFIX "cache metadata"
#define CACHE_SUPERBLOCK_MAGIC 06142003
#define CACHE_SUPERBLOCK_LOCATION 0
/*
* defines a range of metadata versions that this module can handle.
*/
#define MIN_CACHE_VERSION 1
#define MAX_CACHE_VERSION 2
/*
* 3 for btree insert +
* 2 for btree lookup used within space map
*/
#define CACHE_MAX_CONCURRENT_LOCKS 5
#define SPACE_MAP_ROOT_SIZE 128
enum superblock_flag_bits {
/* for spotting crashes that would invalidate the dirty bitset */
CLEAN_SHUTDOWN,
/* metadata must be checked using the tools */
NEEDS_CHECK,
};
/*
* Each mapping from cache block -> origin block carries a set of flags.
*/
enum mapping_bits {
/*
* A valid mapping. Because we're using an array we clear this
* flag for an non existant mapping.
*/
M_VALID = 1,
/*
* The data on the cache is different from that on the origin.
* This flag is only used by metadata format 1.
*/
M_DIRTY = 2
};
struct cache_disk_superblock {
__le32 csum;
__le32 flags;
__le64 blocknr;
__u8 uuid[16];
__le64 magic;
__le32 version;
__u8 policy_name[CACHE_POLICY_NAME_SIZE];
__le32 policy_hint_size;
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
__le64 mapping_root;
__le64 hint_root;
__le64 discard_root;
__le64 discard_block_size;
__le64 discard_nr_blocks;
__le32 data_block_size;
__le32 metadata_block_size;
__le32 cache_blocks;
__le32 compat_flags;
__le32 compat_ro_flags;
__le32 incompat_flags;
__le32 read_hits;
__le32 read_misses;
__le32 write_hits;
__le32 write_misses;
__le32 policy_version[CACHE_POLICY_VERSION_SIZE];
/*
* Metadata format 2 fields.
*/
__le64 dirty_root;
} __packed;
struct dm_cache_metadata {
refcount_t ref_count;
struct list_head list;
unsigned int version;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
struct dm_transaction_manager *tm;
struct dm_array_info info;
struct dm_array_info hint_info;
struct dm_disk_bitset discard_info;
struct rw_semaphore root_lock;
unsigned long flags;
dm_block_t root;
dm_block_t hint_root;
dm_block_t discard_root;
sector_t discard_block_size;
dm_dblock_t discard_nr_blocks;
sector_t data_block_size;
dm_cblock_t cache_blocks;
bool changed:1;
bool clean_when_opened:1;
char policy_name[CACHE_POLICY_NAME_SIZE];
unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
struct dm_cache_statistics stats;
/*
* Reading the space map root can fail, so we read it into this
* buffer before the superblock is locked and updated.
*/
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
/*
* Set if a transaction has to be aborted but the attempt to roll
* back to the previous (good) transaction failed. The only
* metadata operation permissible in this state is the closing of
* the device.
*/
bool fail_io:1;
/*
* Metadata format 2 fields.
*/
dm_block_t dirty_root;
struct dm_disk_bitset dirty_info;
/*
* These structures are used when loading metadata. They're too
* big to put on the stack.
*/
struct dm_array_cursor mapping_cursor;
struct dm_array_cursor hint_cursor;
struct dm_bitset_cursor dirty_cursor;
};
/*
*-----------------------------------------------------------------
* superblock validator
*-----------------------------------------------------------------
*/
#define SUPERBLOCK_CSUM_XOR 9031977
static void sb_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
{
struct cache_disk_superblock *disk_super = dm_block_data(b);
disk_super->blocknr = cpu_to_le64(dm_block_location(b));
disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
sb_block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
}
static int check_metadata_version(struct cache_disk_superblock *disk_super)
{
uint32_t metadata_version = le32_to_cpu(disk_super->version);
if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
return -EINVAL;
}
return 0;
}
static int sb_check(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
{
struct cache_disk_superblock *disk_super = dm_block_data(b);
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
DMERR("%s failed: blocknr %llu: wanted %llu",
__func__, le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
DMERR("%s failed: magic %llu: wanted %llu",
__func__, le64_to_cpu(disk_super->magic),
(unsigned long long)CACHE_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
sb_block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
DMERR("%s failed: csum %u: wanted %u",
__func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
return check_metadata_version(disk_super);
}
static struct dm_block_validator sb_validator = {
.name = "superblock",
.prepare_for_write = sb_prepare_for_write,
.check = sb_check
};
/*----------------------------------------------------------------*/
static int superblock_read_lock(struct dm_cache_metadata *cmd,
struct dm_block **sblock)
{
return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
&sb_validator, sblock);
}
static int superblock_lock_zero(struct dm_cache_metadata *cmd,
struct dm_block **sblock)
{
return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
&sb_validator, sblock);
}
static int superblock_lock(struct dm_cache_metadata *cmd,
struct dm_block **sblock)
{
return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
&sb_validator, sblock);
}
/*----------------------------------------------------------------*/
static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
*/
r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
if (r)
return r;
data_le = dm_block_data(b);
*result = true;
for (i = 0; i < sb_block_size; i++) {
if (data_le[i] != zero) {
*result = false;
break;
}
}
dm_bm_unlock(b);
return 0;
}
static void __setup_mapping_info(struct dm_cache_metadata *cmd)
{
struct dm_btree_value_type vt;
vt.context = NULL;
vt.size = sizeof(__le64);
vt.inc = NULL;
vt.dec = NULL;
vt.equal = NULL;
dm_array_info_init(&cmd->info, cmd->tm, &vt);
if (cmd->policy_hint_size) {
vt.size = sizeof(__le32);
dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
}
}
static int __save_sm_root(struct dm_cache_metadata *cmd)
{
int r;
size_t metadata_len;
r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
if (r < 0)
return r;
return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
metadata_len);
}
static void __copy_sm_root(struct dm_cache_metadata *cmd,
struct cache_disk_superblock *disk_super)
{
memcpy(&disk_super->metadata_space_map_root,
&cmd->metadata_space_map_root,
sizeof(cmd->metadata_space_map_root));
}
static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
{
return cmd->version >= 2;
}
static int __write_initial_superblock(struct dm_cache_metadata *cmd)
{
int r;
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
/* FIXME: see if we can lose the max sectors limit */
if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
r = dm_tm_pre_commit(cmd->tm);
if (r < 0)
return r;
/*
* dm_sm_copy_root() can fail. So we need to do it before we start
* updating the superblock.
*/
r = __save_sm_root(cmd);
if (r)
return r;
r = superblock_lock_zero(cmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
disk_super->flags = 0;
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
disk_super->version = cpu_to_le32(cmd->version);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = cpu_to_le32(0);
__copy_sm_root(cmd, disk_super);
disk_super->mapping_root = cpu_to_le64(cmd->root);
disk_super->hint_root = cpu_to_le64(cmd->hint_root);
disk_super->discard_root = cpu_to_le64(cmd->discard_root);
disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
disk_super->cache_blocks = cpu_to_le32(0);
disk_super->read_hits = cpu_to_le32(0);
disk_super->read_misses = cpu_to_le32(0);
disk_super->write_hits = cpu_to_le32(0);
disk_super->write_misses = cpu_to_le32(0);
if (separate_dirty_bits(cmd))
disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
return dm_tm_commit(cmd->tm, sblock);
}
static int __format_metadata(struct dm_cache_metadata *cmd)
{
int r;
r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
&cmd->tm, &cmd->metadata_sm);
if (r < 0) {
DMERR("tm_create_with_sm failed");
return r;
}
__setup_mapping_info(cmd);
r = dm_array_empty(&cmd->info, &cmd->root);
if (r < 0)
goto bad;
if (separate_dirty_bits(cmd)) {
dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
if (r < 0)
goto bad;
}
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
if (r < 0)
goto bad;
cmd->discard_block_size = 0;
cmd->discard_nr_blocks = 0;
r = __write_initial_superblock(cmd);
if (r)
goto bad;
cmd->clean_when_opened = true;
return 0;
bad:
dm_tm_destroy(cmd->tm);
dm_sm_destroy(cmd->metadata_sm);
return r;
}
static int __check_incompat_features(struct cache_disk_superblock *disk_super,
struct dm_cache_metadata *cmd)
{
uint32_t incompat_flags, features;
incompat_flags = le32_to_cpu(disk_super->incompat_flags);
features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
if (features) {
DMERR("could not access metadata due to unsupported optional features (%lx).",
(unsigned long)features);
return -EINVAL;
}
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
if (bdev_read_only(cmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
if (features) {
DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
(unsigned long)features);
return -EINVAL;
}
return 0;
}
static int __open_metadata(struct dm_cache_metadata *cmd)
{
int r;
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
unsigned long sb_flags;
r = superblock_read_lock(cmd, &sblock);
if (r < 0) {
DMERR("couldn't read lock superblock");
return r;
}
disk_super = dm_block_data(sblock);
/* Verify the data block size hasn't changed */
if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
DMERR("changing the data block size (from %u to %llu) is not supported",
le32_to_cpu(disk_super->data_block_size),
(unsigned long long)cmd->data_block_size);
r = -EINVAL;
goto bad;
}
r = __check_incompat_features(disk_super, cmd);
if (r < 0)
goto bad;
r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
disk_super->metadata_space_map_root,
sizeof(disk_super->metadata_space_map_root),
&cmd->tm, &cmd->metadata_sm);
if (r < 0) {
DMERR("tm_open_with_sm failed");
goto bad;
}
__setup_mapping_info(cmd);
dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
sb_flags = le32_to_cpu(disk_super->flags);
cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
dm_bm_unlock(sblock);
return 0;
bad:
dm_bm_unlock(sblock);
return r;
}
static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
bool format_device)
{
int r;
bool unformatted = false;
r = __superblock_all_zeroes(cmd->bm, &unformatted);
if (r)
return r;
if (unformatted)
return format_device ? __format_metadata(cmd) : -EPERM;
return __open_metadata(cmd);
}
static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
bool may_format_device)
{
int r;
cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
DMERR("could not create block manager");
r = PTR_ERR(cmd->bm);
cmd->bm = NULL;
return r;
}
r = __open_or_format_metadata(cmd, may_format_device);
if (r) {
dm_block_manager_destroy(cmd->bm);
cmd->bm = NULL;
}
return r;
}
static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
bool destroy_bm)
{
dm_sm_destroy(cmd->metadata_sm);
dm_tm_destroy(cmd->tm);
if (destroy_bm)
dm_block_manager_destroy(cmd->bm);
}
typedef unsigned long (*flags_mutator)(unsigned long);
static void update_flags(struct cache_disk_superblock *disk_super,
flags_mutator mutator)
{
uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
disk_super->flags = cpu_to_le32(sb_flags);
}
static unsigned long set_clean_shutdown(unsigned long flags)
{
set_bit(CLEAN_SHUTDOWN, &flags);
return flags;
}
static unsigned long clear_clean_shutdown(unsigned long flags)
{
clear_bit(CLEAN_SHUTDOWN, &flags);
return flags;
}
static void read_superblock_fields(struct dm_cache_metadata *cmd,
struct cache_disk_superblock *disk_super)
{
cmd->version = le32_to_cpu(disk_super->version);
cmd->flags = le32_to_cpu(disk_super->flags);
cmd->root = le64_to_cpu(disk_super->mapping_root);
cmd->hint_root = le64_to_cpu(disk_super->hint_root);
cmd->discard_root = le64_to_cpu(disk_super->discard_root);
cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
if (separate_dirty_bits(cmd))
cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
cmd->changed = false;
}
/*
* The mutator updates the superblock flags.
*/
static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
flags_mutator mutator)
{
int r;
struct cache_disk_superblock *disk_super;
struct dm_block *sblock;
r = superblock_lock(cmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
update_flags(disk_super, mutator);
read_superblock_fields(cmd, disk_super);
dm_bm_unlock(sblock);
return dm_bm_flush(cmd->bm);
}
static int __begin_transaction(struct dm_cache_metadata *cmd)
{
int r;
struct cache_disk_superblock *disk_super;
struct dm_block *sblock;
/*
* We re-read the superblock every time. Shouldn't need to do this
* really.
*/
r = superblock_read_lock(cmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
read_superblock_fields(cmd, disk_super);
dm_bm_unlock(sblock);
return 0;
}
static int __commit_transaction(struct dm_cache_metadata *cmd,
flags_mutator mutator)
{
int r;
struct cache_disk_superblock *disk_super;
struct dm_block *sblock;
/*
* We need to know if the cache_disk_superblock exceeds a 512-byte sector.
*/
BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
if (separate_dirty_bits(cmd)) {
r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
&cmd->dirty_root);
if (r)
return r;
}
r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
&cmd->discard_root);
if (r)
return r;
r = dm_tm_pre_commit(cmd->tm);
if (r < 0)
return r;
r = __save_sm_root(cmd);
if (r)
return r;
r = superblock_lock(cmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
disk_super->flags = cpu_to_le32(cmd->flags);
if (mutator)
update_flags(disk_super, mutator);
disk_super->mapping_root = cpu_to_le64(cmd->root);
if (separate_dirty_bits(cmd))
disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
disk_super->hint_root = cpu_to_le64(cmd->hint_root);
disk_super->discard_root = cpu_to_le64(cmd->discard_root);
disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
__copy_sm_root(cmd, disk_super);
return dm_tm_commit(cmd->tm, sblock);
}
/*----------------------------------------------------------------*/
/*
* The mappings are held in a dm-array that has 64-bit values stored in
* little-endian format. The index is the cblock, the high 48bits of the
* value are the oblock and the low 16 bit the flags.
*/
#define FLAGS_MASK ((1 << 16) - 1)
static __le64 pack_value(dm_oblock_t block, unsigned int flags)
{
uint64_t value = from_oblock(block);
value <<= 16;
value = value | (flags & FLAGS_MASK);
return cpu_to_le64(value);
}
static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
{
uint64_t value = le64_to_cpu(value_le);
uint64_t b = value >> 16;
*block = to_oblock(b);
*flags = value & FLAGS_MASK;
}
/*----------------------------------------------------------------*/
static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned int metadata_version)
{
int r;
struct dm_cache_metadata *cmd;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
DMERR("could not allocate metadata struct");
return ERR_PTR(-ENOMEM);
}
cmd->version = metadata_version;
refcount_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
cmd->cache_blocks = 0;
cmd->policy_hint_size = policy_hint_size;
cmd->changed = true;
cmd->fail_io = false;
r = __create_persistent_data_objects(cmd, may_format_device);
if (r) {
kfree(cmd);
return ERR_PTR(r);
}
r = __begin_transaction_flags(cmd, clear_clean_shutdown);
if (r < 0) {
dm_cache_metadata_close(cmd);
return ERR_PTR(r);
}
return cmd;
}
/*
* We keep a little list of ref counted metadata objects to prevent two
* different target instances creating separate bufio instances. This is
* an issue if a table is reloaded before the suspend.
*/
static DEFINE_MUTEX(table_lock);
static LIST_HEAD(table);
static struct dm_cache_metadata *lookup(struct block_device *bdev)
{
struct dm_cache_metadata *cmd;
list_for_each_entry(cmd, &table, list)
if (cmd->bdev == bdev) {
refcount_inc(&cmd->ref_count);
return cmd;
}
return NULL;
}
static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned int metadata_version)
{
struct dm_cache_metadata *cmd, *cmd2;
mutex_lock(&table_lock);
cmd = lookup(bdev);
mutex_unlock(&table_lock);
if (cmd)
return cmd;
cmd = metadata_open(bdev, data_block_size, may_format_device,
policy_hint_size, metadata_version);
if (!IS_ERR(cmd)) {
mutex_lock(&table_lock);
cmd2 = lookup(bdev);
if (cmd2) {
mutex_unlock(&table_lock);
__destroy_persistent_data_objects(cmd, true);
kfree(cmd);
return cmd2;
}
list_add(&cmd->list, &table);
mutex_unlock(&table_lock);
}
return cmd;
}
static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
{
if (cmd->data_block_size != data_block_size) {
DMERR("data_block_size (%llu) different from that in metadata (%llu)",
(unsigned long long) data_block_size,
(unsigned long long) cmd->data_block_size);
return false;
}
return true;
}
struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned int metadata_version)
{
struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
policy_hint_size, metadata_version);
if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
dm_cache_metadata_close(cmd);
return ERR_PTR(-EINVAL);
}
return cmd;
}
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
if (refcount_dec_and_test(&cmd->ref_count)) {
mutex_lock(&table_lock);
list_del(&cmd->list);
mutex_unlock(&table_lock);
if (!cmd->fail_io)
__destroy_persistent_data_objects(cmd, true);
kfree(cmd);
}
}
/*
* Checks that the given cache block is either unmapped or clean.
*/
static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
bool *result)
{
int r;
__le64 value;
dm_oblock_t ob;
unsigned int flags;
r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
if (r)
return r;
unpack_value(value, &ob, &flags);
*result = !((flags & M_VALID) && (flags & M_DIRTY));
return 0;
}
static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
dm_cblock_t begin, dm_cblock_t end,
bool *result)
{
int r;
*result = true;
while (begin != end) {
r = block_clean_combined_dirty(cmd, begin, result);
if (r) {
DMERR("block_clean_combined_dirty failed");
return r;
}
if (!*result) {
DMERR("cache block %llu is dirty",
(unsigned long long) from_cblock(begin));
return 0;
}
begin = to_cblock(from_cblock(begin) + 1);
}
return 0;
}
static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
dm_cblock_t begin, dm_cblock_t end,
bool *result)
{
int r;
bool dirty_flag;
*result = true;
if (from_cblock(cmd->cache_blocks) == 0)
/* Nothing to do */
return 0;
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
if (r) {
DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
return r;
}
r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
if (r) {
DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
dm_bitset_cursor_end(&cmd->dirty_cursor);
return r;
}
while (begin != end) {
/*
* We assume that unmapped blocks have their dirty bit
* cleared.
*/
dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
if (dirty_flag) {
DMERR("%s: cache block %llu is dirty", __func__,
(unsigned long long) from_cblock(begin));
dm_bitset_cursor_end(&cmd->dirty_cursor);
*result = false;
return 0;
}
begin = to_cblock(from_cblock(begin) + 1);
if (begin == end)
break;
r = dm_bitset_cursor_next(&cmd->dirty_cursor);
if (r) {
DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
dm_bitset_cursor_end(&cmd->dirty_cursor);
return r;
}
}
dm_bitset_cursor_end(&cmd->dirty_cursor);
return 0;
}
static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
dm_cblock_t begin, dm_cblock_t end,
bool *result)
{
if (separate_dirty_bits(cmd))
return blocks_are_clean_separate_dirty(cmd, begin, end, result);
else
return blocks_are_clean_combined_dirty(cmd, begin, end, result);
}
static bool cmd_write_lock(struct dm_cache_metadata *cmd)
{
down_write(&cmd->root_lock);
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
up_write(&cmd->root_lock);
return false;
}
return true;
}
#define WRITE_LOCK(cmd) \
do { \
if (!cmd_write_lock((cmd))) \
return -EINVAL; \
} while (0)
#define WRITE_LOCK_VOID(cmd) \
do { \
if (!cmd_write_lock((cmd))) \
return; \
} while (0)
#define WRITE_UNLOCK(cmd) \
up_write(&(cmd)->root_lock)
static bool cmd_read_lock(struct dm_cache_metadata *cmd)
{
down_read(&cmd->root_lock);
if (cmd->fail_io) {
up_read(&cmd->root_lock);
return false;
}
return true;
}
#define READ_LOCK(cmd) \
do { \
if (!cmd_read_lock((cmd))) \
return -EINVAL; \
} while (0)
#define READ_LOCK_VOID(cmd) \
do { \
if (!cmd_read_lock((cmd))) \
return; \
} while (0)
#define READ_UNLOCK(cmd) \
up_read(&(cmd)->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
bool clean;
__le64 null_mapping = pack_value(0, 0);
WRITE_LOCK(cmd);
__dm_bless_for_disk(&null_mapping);
if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
if (r) {
__dm_unbless_for_disk(&null_mapping);
goto out;
}
if (!clean) {
DMERR("unable to shrink cache due to dirty blocks");
r = -EINVAL;
__dm_unbless_for_disk(&null_mapping);
goto out;
}
}
r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
from_cblock(new_cache_size),
&null_mapping, &cmd->root);
if (r)
goto out;
if (separate_dirty_bits(cmd)) {
r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
false, &cmd->dirty_root);
if (r)
goto out;
}
cmd->cache_blocks = new_cache_size;
cmd->changed = true;
out:
WRITE_UNLOCK(cmd);
return r;
}
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
dm_dblock_t new_nr_entries)
{
int r;
WRITE_LOCK(cmd);
r = dm_bitset_resize(&cmd->discard_info,
cmd->discard_root,
from_dblock(cmd->discard_nr_blocks),
from_dblock(new_nr_entries),
false, &cmd->discard_root);
if (!r) {
cmd->discard_block_size = discard_block_size;
cmd->discard_nr_blocks = new_nr_entries;
}
cmd->changed = true;
WRITE_UNLOCK(cmd);
return r;
}
static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
{
return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
from_dblock(b), &cmd->discard_root);
}
static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
{
return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
from_dblock(b), &cmd->discard_root);
}
static int __discard(struct dm_cache_metadata *cmd,
dm_dblock_t dblock, bool discard)
{
int r;
r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
if (r)
return r;
cmd->changed = true;
return 0;
}
int dm_cache_set_discard(struct dm_cache_metadata *cmd,
dm_dblock_t dblock, bool discard)
{
int r;
WRITE_LOCK(cmd);
r = __discard(cmd, dblock, discard);
WRITE_UNLOCK(cmd);
return r;
}
static int __load_discards(struct dm_cache_metadata *cmd,
load_discard_fn fn, void *context)
{
int r = 0;
uint32_t b;
struct dm_bitset_cursor c;
if (from_dblock(cmd->discard_nr_blocks) == 0)
/* nothing to do */
return 0;
if (cmd->clean_when_opened) {
r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
if (r)
return r;
r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
from_dblock(cmd->discard_nr_blocks), &c);
if (r)
return r;
for (b = 0; ; b++) {
r = fn(context, cmd->discard_block_size, to_dblock(b),
dm_bitset_cursor_get_value(&c));
if (r)
break;
if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
break;
r = dm_bitset_cursor_next(&c);
if (r)
break;
}
dm_bitset_cursor_end(&c);
} else {
for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
r = fn(context, cmd->discard_block_size, to_dblock(b), false);
if (r)
return r;
}
}
return r;
}
int dm_cache_load_discards(struct dm_cache_metadata *cmd,
load_discard_fn fn, void *context)
{
int r;
READ_LOCK(cmd);
r = __load_discards(cmd, fn, context);
READ_UNLOCK(cmd);
return r;
}
int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
{
READ_LOCK(cmd);
*result = cmd->cache_blocks;
READ_UNLOCK(cmd);
return 0;
}
static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
{
int r;
__le64 value = pack_value(0, 0);
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
&value, &cmd->root);
if (r)
return r;
cmd->changed = true;
return 0;
}
int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
{
int r;
WRITE_LOCK(cmd);
r = __remove(cmd, cblock);
WRITE_UNLOCK(cmd);
return r;
}
static int __insert(struct dm_cache_metadata *cmd,
dm_cblock_t cblock, dm_oblock_t oblock)
{
int r;
__le64 value = pack_value(oblock, M_VALID);
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
&value, &cmd->root);
if (r)
return r;
cmd->changed = true;
return 0;
}
int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
dm_cblock_t cblock, dm_oblock_t oblock)
{
int r;
WRITE_LOCK(cmd);
r = __insert(cmd, cblock, oblock);
WRITE_UNLOCK(cmd);
return r;
}
struct thunk {
load_mapping_fn fn;
void *context;
struct dm_cache_metadata *cmd;
bool respect_dirty_flags;
bool hints_valid;
};
static bool policy_unchanged(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy)
{
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned int *policy_version = dm_cache_policy_get_version(policy);
size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
/*
* Ensure policy names match.
*/
if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
return false;
/*
* Ensure policy major versions match.
*/
if (cmd->policy_version[0] != policy_version[0])
return false;
/*
* Ensure policy hint sizes match.
*/
if (cmd->policy_hint_size != policy_hint_size)
return false;
return true;
}
static bool hints_array_initialized(struct dm_cache_metadata *cmd)
{
return cmd->hint_root && cmd->policy_hint_size;
}
static bool hints_array_available(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy)
{
return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
hints_array_initialized(cmd);
}
static int __load_mapping_v1(struct dm_cache_metadata *cmd,
uint64_t cb, bool hints_valid,
struct dm_array_cursor *mapping_cursor,
struct dm_array_cursor *hint_cursor,
load_mapping_fn fn, void *context)
{
int r = 0;
__le64 mapping;
__le32 hint = 0;
__le64 *mapping_value_le;
__le32 *hint_value_le;
dm_oblock_t oblock;
unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
memcpy(&mapping, mapping_value_le, sizeof(mapping));
unpack_value(mapping, &oblock, &flags);
if (flags & M_VALID) {
if (hints_valid) {
dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
memcpy(&hint, hint_value_le, sizeof(hint));
}
if (cmd->clean_when_opened)
dirty = flags & M_DIRTY;
r = fn(context, oblock, to_cblock(cb), dirty,
le32_to_cpu(hint), hints_valid);
if (r) {
DMERR("policy couldn't load cache block %llu",
(unsigned long long) from_cblock(to_cblock(cb)));
}
}
return r;
}
static int __load_mapping_v2(struct dm_cache_metadata *cmd,
uint64_t cb, bool hints_valid,
struct dm_array_cursor *mapping_cursor,
struct dm_array_cursor *hint_cursor,
struct dm_bitset_cursor *dirty_cursor,
load_mapping_fn fn, void *context)
{
int r = 0;
__le64 mapping;
__le32 hint = 0;
__le64 *mapping_value_le;
__le32 *hint_value_le;
dm_oblock_t oblock;
unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
memcpy(&mapping, mapping_value_le, sizeof(mapping));
unpack_value(mapping, &oblock, &flags);
if (flags & M_VALID) {
if (hints_valid) {
dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
memcpy(&hint, hint_value_le, sizeof(hint));
}
if (cmd->clean_when_opened)
dirty = dm_bitset_cursor_get_value(dirty_cursor);
r = fn(context, oblock, to_cblock(cb), dirty,
le32_to_cpu(hint), hints_valid);
if (r) {
DMERR("policy couldn't load cache block %llu",
(unsigned long long) from_cblock(to_cblock(cb)));
}
}
return r;
}
static int __load_mappings(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy,
load_mapping_fn fn, void *context)
{
int r;
uint64_t cb;
bool hints_valid = hints_array_available(cmd, policy);
if (from_cblock(cmd->cache_blocks) == 0)
/* Nothing to do */
return 0;
r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
if (r)
return r;
if (hints_valid) {
r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
if (r) {
dm_array_cursor_end(&cmd->mapping_cursor);
return r;
}
}
if (separate_dirty_bits(cmd)) {
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
from_cblock(cmd->cache_blocks),
&cmd->dirty_cursor);
if (r) {
dm_array_cursor_end(&cmd->hint_cursor);
dm_array_cursor_end(&cmd->mapping_cursor);
return r;
}
}
for (cb = 0; ; cb++) {
if (separate_dirty_bits(cmd))
r = __load_mapping_v2(cmd, cb, hints_valid,
&cmd->mapping_cursor,
&cmd->hint_cursor,
&cmd->dirty_cursor,
fn, context);
else
r = __load_mapping_v1(cmd, cb, hints_valid,
&cmd->mapping_cursor, &cmd->hint_cursor,
fn, context);
if (r)
goto out;
/*
* We need to break out before we move the cursors.
*/
if (cb >= (from_cblock(cmd->cache_blocks) - 1))
break;
r = dm_array_cursor_next(&cmd->mapping_cursor);
if (r) {
DMERR("dm_array_cursor_next for mapping failed");
goto out;
}
if (hints_valid) {
r = dm_array_cursor_next(&cmd->hint_cursor);
if (r) {
dm_array_cursor_end(&cmd->hint_cursor);
hints_valid = false;
}
}
if (separate_dirty_bits(cmd)) {
r = dm_bitset_cursor_next(&cmd->dirty_cursor);
if (r) {
DMERR("dm_bitset_cursor_next for dirty failed");
goto out;
}
}
}
out:
dm_array_cursor_end(&cmd->mapping_cursor);
if (hints_valid)
dm_array_cursor_end(&cmd->hint_cursor);
if (separate_dirty_bits(cmd))
dm_bitset_cursor_end(&cmd->dirty_cursor);
return r;
}
int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy,
load_mapping_fn fn, void *context)
{
int r;
READ_LOCK(cmd);
r = __load_mappings(cmd, policy, fn, context);
READ_UNLOCK(cmd);
return r;
}
static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
{
__le64 value;
dm_oblock_t oblock;
unsigned int flags;
memcpy(&value, leaf, sizeof(value));
unpack_value(value, &oblock, &flags);
return 0;
}
static int __dump_mappings(struct dm_cache_metadata *cmd)
{
return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
}
void dm_cache_dump(struct dm_cache_metadata *cmd)
{
READ_LOCK_VOID(cmd);
__dump_mappings(cmd);
READ_UNLOCK(cmd);
}
int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
{
int r;
READ_LOCK(cmd);
r = cmd->changed;
READ_UNLOCK(cmd);
return r;
}
static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
{
int r;
unsigned int flags;
dm_oblock_t oblock;
__le64 value;
r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
if (r)
return r;
unpack_value(value, &oblock, &flags);
if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
/* nothing to be done */
return 0;
value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
&value, &cmd->root);
if (r)
return r;
cmd->changed = true;
return 0;
}
static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r;
unsigned int i;
for (i = 0; i < nr_bits; i++) {
r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
if (r)
return r;
}
return 0;
}
static int is_dirty_callback(uint32_t index, bool *value, void *context)
{
unsigned long *bits = context;
*value = test_bit(index, bits);
return 0;
}
static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r = 0;
/* nr_bits is really just a sanity check */
if (nr_bits != from_cblock(cmd->cache_blocks)) {
DMERR("dirty bitset is wrong size");
return -EINVAL;
}
r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
if (r)
return r;
cmd->changed = true;
return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
}
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
unsigned int nr_bits,
unsigned long *bits)
{
int r;
WRITE_LOCK(cmd);
if (separate_dirty_bits(cmd))
r = __set_dirty_bits_v2(cmd, nr_bits, bits);
else
r = __set_dirty_bits_v1(cmd, nr_bits, bits);
WRITE_UNLOCK(cmd);
return r;
}
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
READ_LOCK_VOID(cmd);
*stats = cmd->stats;
READ_UNLOCK(cmd);
}
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
WRITE_LOCK_VOID(cmd);
cmd->stats = *stats;
WRITE_UNLOCK(cmd);
}
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
{
int r = -EINVAL;
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
WRITE_LOCK(cmd);
if (cmd->fail_io)
goto out;
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
r = __begin_transaction(cmd);
out:
WRITE_UNLOCK(cmd);
return r;
}
int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
dm_block_t *result)
{
int r = -EINVAL;
READ_LOCK(cmd);
if (!cmd->fail_io)
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
}
int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
dm_block_t *result)
{
int r = -EINVAL;
READ_LOCK(cmd);
if (!cmd->fail_io)
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
}
/*----------------------------------------------------------------*/
static int get_hint(uint32_t index, void *value_le, void *context)
{
uint32_t value;
struct dm_cache_policy *policy = context;
value = policy_get_hint(policy, to_cblock(index));
*((__le32 *) value_le) = cpu_to_le32(value);
return 0;
}
/*
* It's quicker to always delete the hint array, and recreate with
* dm_array_new().
*/
static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
{
int r;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned int *policy_version = dm_cache_policy_get_version(policy);
if (!policy_name[0] ||
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
return -EINVAL;
strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
hint_size = dm_cache_policy_get_hint_size(policy);
if (!hint_size)
return 0; /* short-circuit hints initialization */
cmd->policy_hint_size = hint_size;
if (cmd->hint_root) {
r = dm_array_del(&cmd->hint_info, cmd->hint_root);
if (r)
return r;
}
return dm_array_new(&cmd->hint_info, &cmd->hint_root,
from_cblock(cmd->cache_blocks),
get_hint, policy);
}
int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
{
int r;
WRITE_LOCK(cmd);
r = write_hints(cmd, policy);
WRITE_UNLOCK(cmd);
return r;
}
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
{
int r;
READ_LOCK(cmd);
r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
READ_UNLOCK(cmd);
return r;
}
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
{
WRITE_LOCK_VOID(cmd);
dm_bm_set_read_only(cmd->bm);
WRITE_UNLOCK(cmd);
}
void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
{
WRITE_LOCK_VOID(cmd);
dm_bm_set_read_write(cmd->bm);
WRITE_UNLOCK(cmd);
}
int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
{
int r;
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
WRITE_LOCK(cmd);
set_bit(NEEDS_CHECK, &cmd->flags);
r = superblock_lock(cmd, &sblock);
if (r) {
DMERR("couldn't read superblock");
goto out;
}
disk_super = dm_block_data(sblock);
disk_super->flags = cpu_to_le32(cmd->flags);
dm_bm_unlock(sblock);
out:
WRITE_UNLOCK(cmd);
return r;
}
int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
{
READ_LOCK(cmd);
*result = !!test_bit(NEEDS_CHECK, &cmd->flags);
READ_UNLOCK(cmd);
return 0;
}
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
{
int r = -EINVAL;
struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
/* fail_io is double-checked with cmd->root_lock held below */
if (unlikely(cmd->fail_io))
return r;
/*
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
* cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
* shrinker associated with the block manager's bufio client vs cmd root_lock).
* - must take shrinker_rwsem without holding cmd->root_lock
*/
new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
WRITE_LOCK(cmd);
if (cmd->fail_io) {
WRITE_UNLOCK(cmd);
goto out;
}
__destroy_persistent_data_objects(cmd, false);
old_bm = cmd->bm;
if (IS_ERR(new_bm)) {
DMERR("could not create block manager during abort");
cmd->bm = NULL;
r = PTR_ERR(new_bm);
goto out_unlock;
}
cmd->bm = new_bm;
r = __open_or_format_metadata(cmd, false);
if (r) {
cmd->bm = NULL;
goto out_unlock;
}
new_bm = NULL;
out_unlock:
if (r)
cmd->fail_io = true;
WRITE_UNLOCK(cmd);
dm_block_manager_destroy(old_bm);
out:
if (new_bm && !IS_ERR(new_bm))
dm_block_manager_destroy(new_bm);
return r;
}
| linux-master | drivers/md/dm-cache-metadata.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
*
* This file is released under the GPL.
*/
#include "dm-exception-store.h"
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dm-io.h>
#define DM_MSG_PREFIX "transient snapshot"
/*
*---------------------------------------------------------------
* Implementation of the store for non-persistent snapshots.
*---------------------------------------------------------------
*/
struct transient_c {
sector_t next_free;
};
static void transient_dtr(struct dm_exception_store *store)
{
kfree(store->context);
}
static int transient_read_metadata(struct dm_exception_store *store,
int (*callback)(void *callback_context,
chunk_t old, chunk_t new),
void *callback_context)
{
return 0;
}
static int transient_prepare_exception(struct dm_exception_store *store,
struct dm_exception *e)
{
struct transient_c *tc = store->context;
sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
if (size < (tc->next_free + store->chunk_size))
return -1;
e->new_chunk = sector_to_chunk(store, tc->next_free);
tc->next_free += store->chunk_size;
return 0;
}
static void transient_commit_exception(struct dm_exception_store *store,
struct dm_exception *e, int valid,
void (*callback)(void *, int success),
void *callback_context)
{
/* Just succeed */
callback(callback_context, valid);
}
static void transient_usage(struct dm_exception_store *store,
sector_t *total_sectors,
sector_t *sectors_allocated,
sector_t *metadata_sectors)
{
*sectors_allocated = ((struct transient_c *) store->context)->next_free;
*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
*metadata_sectors = 0;
}
static int transient_ctr(struct dm_exception_store *store, char *options)
{
struct transient_c *tc;
tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
if (!tc)
return -ENOMEM;
tc->next_free = 0;
store->context = tc;
return 0;
}
static unsigned int transient_status(struct dm_exception_store *store,
status_type_t status, char *result,
unsigned int maxlen)
{
unsigned int sz = 0;
switch (status) {
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
DMEMIT(" N %llu", (unsigned long long)store->chunk_size);
break;
case STATUSTYPE_IMA:
*result = '\0';
break;
}
return sz;
}
static struct dm_exception_store_type _transient_type = {
.name = "transient",
.module = THIS_MODULE,
.ctr = transient_ctr,
.dtr = transient_dtr,
.read_metadata = transient_read_metadata,
.prepare_exception = transient_prepare_exception,
.commit_exception = transient_commit_exception,
.usage = transient_usage,
.status = transient_status,
};
static struct dm_exception_store_type _transient_compat_type = {
.name = "N",
.module = THIS_MODULE,
.ctr = transient_ctr,
.dtr = transient_dtr,
.read_metadata = transient_read_metadata,
.prepare_exception = transient_prepare_exception,
.commit_exception = transient_commit_exception,
.usage = transient_usage,
.status = transient_status,
};
int dm_transient_snapshot_init(void)
{
int r;
r = dm_exception_store_type_register(&_transient_type);
if (r) {
DMWARN("Unable to register transient exception store type");
return r;
}
r = dm_exception_store_type_register(&_transient_compat_type);
if (r) {
DMWARN("Unable to register old-style transient exception store type");
dm_exception_store_type_unregister(&_transient_type);
return r;
}
return r;
}
void dm_transient_snapshot_exit(void)
{
dm_exception_store_type_unregister(&_transient_type);
dm_exception_store_type_unregister(&_transient_compat_type);
}
| linux-master | drivers/md/dm-snap-transient.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Facebook. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/uio.h>
#define DM_MSG_PREFIX "log-writes"
/*
* This target will sequentially log all writes to the target device onto the
* log device. This is helpful for replaying writes to check for fs consistency
* at all times. This target provides a mechanism to mark specific events to
* check data at a later time. So for example you would:
*
* write data
* fsync
* dmsetup message /dev/whatever mark mymark
* unmount /mnt/test
*
* Then replay the log up to mymark and check the contents of the replay to
* verify it matches what was written.
*
* We log writes only after they have been flushed, this makes the log describe
* close to the order in which the data hits the actual disk, not its cache. So
* for example the following sequence (W means write, C means complete)
*
* Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd
*
* Would result in the log looking like this:
*
* c,a,b,flush,fuad,<other writes>,<next flush>
*
* This is meant to help expose problems where file systems do not properly wait
* on data being written before invoking a FLUSH. FUA bypasses cache so once it
* completes it is added to the log as it should be on disk.
*
* We treat DISCARDs as if they don't bypass cache so that they are logged in
* order of completion along with the normal writes. If we didn't do it this
* way we would process all the discards first and then write all the data, when
* in fact we want to do the data and the discard in the order that they
* completed.
*/
#define LOG_FLUSH_FLAG (1 << 0)
#define LOG_FUA_FLAG (1 << 1)
#define LOG_DISCARD_FLAG (1 << 2)
#define LOG_MARK_FLAG (1 << 3)
#define LOG_METADATA_FLAG (1 << 4)
#define WRITE_LOG_VERSION 1ULL
#define WRITE_LOG_MAGIC 0x6a736677736872ULL
#define WRITE_LOG_SUPER_SECTOR 0
/*
* The disk format for this is braindead simple.
*
* At byte 0 we have our super, followed by the following sequence for
* nr_entries:
*
* [ 1 sector ][ entry->nr_sectors ]
* [log_write_entry][ data written ]
*
* The log_write_entry takes up a full sector so we can have arbitrary length
* marks and it leaves us room for extra content in the future.
*/
/*
* Basic info about the log for userspace.
*/
struct log_write_super {
__le64 magic;
__le64 version;
__le64 nr_entries;
__le32 sectorsize;
};
/*
* sector - the sector we wrote.
* nr_sectors - the number of sectors we wrote.
* flags - flags for this log entry.
* data_len - the size of the data in this log entry, this is for private log
* entry stuff, the MARK data provided by userspace for example.
*/
struct log_write_entry {
__le64 sector;
__le64 nr_sectors;
__le64 flags;
__le64 data_len;
};
struct log_writes_c {
struct dm_dev *dev;
struct dm_dev *logdev;
u64 logged_entries;
u32 sectorsize;
u32 sectorshift;
atomic_t io_blocks;
atomic_t pending_blocks;
sector_t next_sector;
sector_t end_sector;
bool logging_enabled;
bool device_supports_discard;
spinlock_t blocks_lock;
struct list_head unflushed_blocks;
struct list_head logging_blocks;
wait_queue_head_t wait;
struct task_struct *log_kthread;
struct completion super_done;
};
struct pending_block {
int vec_cnt;
u64 flags;
sector_t sector;
sector_t nr_sectors;
char *data;
u32 datalen;
struct list_head list;
struct bio_vec vecs[];
};
struct per_bio_data {
struct pending_block *block;
};
static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc,
sector_t sectors)
{
return sectors >> (lc->sectorshift - SECTOR_SHIFT);
}
static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc,
sector_t sectors)
{
return sectors << (lc->sectorshift - SECTOR_SHIFT);
}
static void put_pending_block(struct log_writes_c *lc)
{
if (atomic_dec_and_test(&lc->pending_blocks)) {
smp_mb__after_atomic();
if (waitqueue_active(&lc->wait))
wake_up(&lc->wait);
}
}
static void put_io_block(struct log_writes_c *lc)
{
if (atomic_dec_and_test(&lc->io_blocks)) {
smp_mb__after_atomic();
if (waitqueue_active(&lc->wait))
wake_up(&lc->wait);
}
}
static void log_end_io(struct bio *bio)
{
struct log_writes_c *lc = bio->bi_private;
if (bio->bi_status) {
unsigned long flags;
DMERR("Error writing log block, error=%d", bio->bi_status);
spin_lock_irqsave(&lc->blocks_lock, flags);
lc->logging_enabled = false;
spin_unlock_irqrestore(&lc->blocks_lock, flags);
}
bio_free_pages(bio);
put_io_block(lc);
bio_put(bio);
}
static void log_end_super(struct bio *bio)
{
struct log_writes_c *lc = bio->bi_private;
complete(&lc->super_done);
log_end_io(bio);
}
/*
* Meant to be called if there is an error, it will free all the pages
* associated with the block.
*/
static void free_pending_block(struct log_writes_c *lc,
struct pending_block *block)
{
int i;
for (i = 0; i < block->vec_cnt; i++) {
if (block->vecs[i].bv_page)
__free_page(block->vecs[i].bv_page);
}
kfree(block->data);
kfree(block);
put_pending_block(lc);
}
static int write_metadata(struct log_writes_c *lc, void *entry,
size_t entrylen, void *data, size_t datalen,
sector_t sector)
{
struct bio *bio;
struct page *page;
void *ptr;
size_t ret;
bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io;
bio->bi_private = lc;
page = alloc_page(GFP_KERNEL);
if (!page) {
DMERR("Couldn't alloc log page");
bio_put(bio);
goto error;
}
ptr = kmap_local_page(page);
memcpy(ptr, entry, entrylen);
if (datalen)
memcpy(ptr + entrylen, data, datalen);
memset(ptr + entrylen + datalen, 0,
lc->sectorsize - entrylen - datalen);
kunmap_local(ptr);
ret = bio_add_page(bio, page, lc->sectorsize, 0);
if (ret != lc->sectorsize) {
DMERR("Couldn't add page to the log block");
goto error_bio;
}
submit_bio(bio);
return 0;
error_bio:
bio_put(bio);
__free_page(page);
error:
put_io_block(lc);
return -1;
}
static int write_inline_data(struct log_writes_c *lc, void *entry,
size_t entrylen, void *data, size_t datalen,
sector_t sector)
{
int bio_pages, pg_datalen, pg_sectorlen, i;
struct page *page;
struct bio *bio;
size_t ret;
void *ptr;
while (datalen) {
bio_pages = bio_max_segs(DIV_ROUND_UP(datalen, PAGE_SIZE));
atomic_inc(&lc->io_blocks);
bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
for (i = 0; i < bio_pages; i++) {
pg_datalen = min_t(int, datalen, PAGE_SIZE);
pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
page = alloc_page(GFP_KERNEL);
if (!page) {
DMERR("Couldn't alloc inline data page");
goto error_bio;
}
ptr = kmap_local_page(page);
memcpy(ptr, data, pg_datalen);
if (pg_sectorlen > pg_datalen)
memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
kunmap_local(ptr);
ret = bio_add_page(bio, page, pg_sectorlen, 0);
if (ret != pg_sectorlen) {
DMERR("Couldn't add page of inline data");
__free_page(page);
goto error_bio;
}
datalen -= pg_datalen;
data += pg_datalen;
}
submit_bio(bio);
sector += bio_pages * PAGE_SECTORS;
}
return 0;
error_bio:
bio_free_pages(bio);
bio_put(bio);
put_io_block(lc);
return -1;
}
static int log_one_block(struct log_writes_c *lc,
struct pending_block *block, sector_t sector)
{
struct bio *bio;
struct log_write_entry entry;
size_t metadatalen, ret;
int i;
entry.sector = cpu_to_le64(block->sector);
entry.nr_sectors = cpu_to_le64(block->nr_sectors);
entry.flags = cpu_to_le64(block->flags);
entry.data_len = cpu_to_le64(block->datalen);
metadatalen = (block->flags & LOG_MARK_FLAG) ? block->datalen : 0;
if (write_metadata(lc, &entry, sizeof(entry), block->data,
metadatalen, sector)) {
free_pending_block(lc, block);
return -1;
}
sector += dev_to_bio_sectors(lc, 1);
if (block->datalen && metadatalen == 0) {
if (write_inline_data(lc, &entry, sizeof(entry), block->data,
block->datalen, sector)) {
free_pending_block(lc, block);
return -1;
}
/* we don't support both inline data & bio data */
goto out;
}
if (!block->vec_cnt)
goto out;
atomic_inc(&lc->io_blocks);
bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
for (i = 0; i < block->vec_cnt; i++) {
/*
* The page offset is always 0 because we allocate a new page
* for every bvec in the original bio for simplicity sake.
*/
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
bio = bio_alloc(lc->logdev->bdev,
bio_max_segs(block->vec_cnt - i),
REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
if (ret != block->vecs[i].bv_len) {
DMERR("Couldn't add page on new bio?");
bio_put(bio);
goto error;
}
}
sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
}
submit_bio(bio);
out:
kfree(block->data);
kfree(block);
put_pending_block(lc);
return 0;
error:
free_pending_block(lc, block);
put_io_block(lc);
return -1;
}
static int log_super(struct log_writes_c *lc)
{
struct log_write_super super;
super.magic = cpu_to_le64(WRITE_LOG_MAGIC);
super.version = cpu_to_le64(WRITE_LOG_VERSION);
super.nr_entries = cpu_to_le64(lc->logged_entries);
super.sectorsize = cpu_to_le32(lc->sectorsize);
if (write_metadata(lc, &super, sizeof(super), NULL, 0,
WRITE_LOG_SUPER_SECTOR)) {
DMERR("Couldn't write super");
return -1;
}
/*
* Super sector should be writen in-order, otherwise the
* nr_entries could be rewritten incorrectly by an old bio.
*/
wait_for_completion_io(&lc->super_done);
return 0;
}
static inline sector_t logdev_last_sector(struct log_writes_c *lc)
{
return bdev_nr_sectors(lc->logdev->bdev);
}
static int log_writes_kthread(void *arg)
{
struct log_writes_c *lc = arg;
sector_t sector = 0;
while (!kthread_should_stop()) {
bool super = false;
bool logging_enabled;
struct pending_block *block = NULL;
int ret;
spin_lock_irq(&lc->blocks_lock);
if (!list_empty(&lc->logging_blocks)) {
block = list_first_entry(&lc->logging_blocks,
struct pending_block, list);
list_del_init(&block->list);
if (!lc->logging_enabled)
goto next;
sector = lc->next_sector;
if (!(block->flags & LOG_DISCARD_FLAG))
lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors);
lc->next_sector += dev_to_bio_sectors(lc, 1);
/*
* Apparently the size of the device may not be known
* right away, so handle this properly.
*/
if (!lc->end_sector)
lc->end_sector = logdev_last_sector(lc);
if (lc->end_sector &&
lc->next_sector >= lc->end_sector) {
DMERR("Ran out of space on the logdev");
lc->logging_enabled = false;
goto next;
}
lc->logged_entries++;
atomic_inc(&lc->io_blocks);
super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG));
if (super)
atomic_inc(&lc->io_blocks);
}
next:
logging_enabled = lc->logging_enabled;
spin_unlock_irq(&lc->blocks_lock);
if (block) {
if (logging_enabled) {
ret = log_one_block(lc, block, sector);
if (!ret && super)
ret = log_super(lc);
if (ret) {
spin_lock_irq(&lc->blocks_lock);
lc->logging_enabled = false;
spin_unlock_irq(&lc->blocks_lock);
}
} else
free_pending_block(lc, block);
continue;
}
if (!try_to_freeze()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
list_empty(&lc->logging_blocks))
schedule();
__set_current_state(TASK_RUNNING);
}
}
return 0;
}
/*
* Construct a log-writes mapping:
* log-writes <dev_path> <log_dev_path>
*/
static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct log_writes_c *lc;
struct dm_arg_set as;
const char *devname, *logdevname;
int ret;
as.argc = argc;
as.argv = argv;
if (argc < 2) {
ti->error = "Invalid argument count";
return -EINVAL;
}
lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
if (!lc) {
ti->error = "Cannot allocate context";
return -ENOMEM;
}
spin_lock_init(&lc->blocks_lock);
INIT_LIST_HEAD(&lc->unflushed_blocks);
INIT_LIST_HEAD(&lc->logging_blocks);
init_waitqueue_head(&lc->wait);
init_completion(&lc->super_done);
atomic_set(&lc->io_blocks, 0);
atomic_set(&lc->pending_blocks, 0);
devname = dm_shift_arg(&as);
ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
if (ret) {
ti->error = "Device lookup failed";
goto bad;
}
logdevname = dm_shift_arg(&as);
ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
&lc->logdev);
if (ret) {
ti->error = "Log device lookup failed";
dm_put_device(ti, lc->dev);
goto bad;
}
lc->sectorsize = bdev_logical_block_size(lc->dev->bdev);
lc->sectorshift = ilog2(lc->sectorsize);
lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
if (IS_ERR(lc->log_kthread)) {
ret = PTR_ERR(lc->log_kthread);
ti->error = "Couldn't alloc kthread";
dm_put_device(ti, lc->dev);
dm_put_device(ti, lc->logdev);
goto bad;
}
/*
* next_sector is in 512b sectors to correspond to what bi_sector expects.
* The super starts at sector 0, and the next_sector is the next logical
* one based on the sectorsize of the device.
*/
lc->next_sector = lc->sectorsize >> SECTOR_SHIFT;
lc->logging_enabled = true;
lc->end_sector = logdev_last_sector(lc);
lc->device_supports_discard = true;
ti->num_flush_bios = 1;
ti->flush_supported = true;
ti->num_discard_bios = 1;
ti->discards_supported = true;
ti->per_io_data_size = sizeof(struct per_bio_data);
ti->private = lc;
return 0;
bad:
kfree(lc);
return ret;
}
static int log_mark(struct log_writes_c *lc, char *data)
{
struct pending_block *block;
size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
if (!block) {
DMERR("Error allocating pending block");
return -ENOMEM;
}
block->data = kstrndup(data, maxsize - 1, GFP_KERNEL);
if (!block->data) {
DMERR("Error copying mark data");
kfree(block);
return -ENOMEM;
}
atomic_inc(&lc->pending_blocks);
block->datalen = strlen(block->data);
block->flags |= LOG_MARK_FLAG;
spin_lock_irq(&lc->blocks_lock);
list_add_tail(&block->list, &lc->logging_blocks);
spin_unlock_irq(&lc->blocks_lock);
wake_up_process(lc->log_kthread);
return 0;
}
static void log_writes_dtr(struct dm_target *ti)
{
struct log_writes_c *lc = ti->private;
spin_lock_irq(&lc->blocks_lock);
list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
spin_unlock_irq(&lc->blocks_lock);
/*
* This is just nice to have since it'll update the super to include the
* unflushed blocks, if it fails we don't really care.
*/
log_mark(lc, "dm-log-writes-end");
wake_up_process(lc->log_kthread);
wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
!atomic_read(&lc->pending_blocks));
kthread_stop(lc->log_kthread);
WARN_ON(!list_empty(&lc->logging_blocks));
WARN_ON(!list_empty(&lc->unflushed_blocks));
dm_put_device(ti, lc->dev);
dm_put_device(ti, lc->logdev);
kfree(lc);
}
static void normal_map_bio(struct dm_target *ti, struct bio *bio)
{
struct log_writes_c *lc = ti->private;
bio_set_dev(bio, lc->dev->bdev);
}
static int log_writes_map(struct dm_target *ti, struct bio *bio)
{
struct log_writes_c *lc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
struct pending_block *block;
struct bvec_iter iter;
struct bio_vec bv;
size_t alloc_size;
int i = 0;
bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
bool fua_bio = (bio->bi_opf & REQ_FUA);
bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
bool meta_bio = (bio->bi_opf & REQ_META);
pb->block = NULL;
/* Don't bother doing anything if logging has been disabled */
if (!lc->logging_enabled)
goto map_bio;
/*
* Map reads as normal.
*/
if (bio_data_dir(bio) == READ)
goto map_bio;
/* No sectors and not a flush? Don't care */
if (!bio_sectors(bio) && !flush_bio)
goto map_bio;
/*
* Discards will have bi_size set but there's no actual data, so just
* allocate the size of the pending block.
*/
if (discard_bio)
alloc_size = sizeof(struct pending_block);
else
alloc_size = struct_size(block, vecs, bio_segments(bio));
block = kzalloc(alloc_size, GFP_NOIO);
if (!block) {
DMERR("Error allocating pending block");
spin_lock_irq(&lc->blocks_lock);
lc->logging_enabled = false;
spin_unlock_irq(&lc->blocks_lock);
return DM_MAPIO_KILL;
}
INIT_LIST_HEAD(&block->list);
pb->block = block;
atomic_inc(&lc->pending_blocks);
if (flush_bio)
block->flags |= LOG_FLUSH_FLAG;
if (fua_bio)
block->flags |= LOG_FUA_FLAG;
if (discard_bio)
block->flags |= LOG_DISCARD_FLAG;
if (meta_bio)
block->flags |= LOG_METADATA_FLAG;
block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
/* We don't need the data, just submit */
if (discard_bio) {
WARN_ON(flush_bio || fua_bio);
if (lc->device_supports_discard)
goto map_bio;
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
/* Flush bio, splice the unflushed blocks onto this list and submit */
if (flush_bio && !bio_sectors(bio)) {
spin_lock_irq(&lc->blocks_lock);
list_splice_init(&lc->unflushed_blocks, &block->list);
spin_unlock_irq(&lc->blocks_lock);
goto map_bio;
}
/*
* We will write this bio somewhere else way later so we need to copy
* the actual contents into new pages so we know the data will always be
* there.
*
* We do this because this could be a bio from O_DIRECT in which case we
* can't just hold onto the page until some later point, we have to
* manually copy the contents.
*/
bio_for_each_segment(bv, bio, iter) {
struct page *page;
void *dst;
page = alloc_page(GFP_NOIO);
if (!page) {
DMERR("Error allocing page");
free_pending_block(lc, block);
spin_lock_irq(&lc->blocks_lock);
lc->logging_enabled = false;
spin_unlock_irq(&lc->blocks_lock);
return DM_MAPIO_KILL;
}
dst = kmap_local_page(page);
memcpy_from_bvec(dst, &bv);
kunmap_local(dst);
block->vecs[i].bv_page = page;
block->vecs[i].bv_len = bv.bv_len;
block->vec_cnt++;
i++;
}
/* Had a flush with data in it, weird */
if (flush_bio) {
spin_lock_irq(&lc->blocks_lock);
list_splice_init(&lc->unflushed_blocks, &block->list);
spin_unlock_irq(&lc->blocks_lock);
}
map_bio:
normal_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
static int normal_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
struct log_writes_c *lc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
if (bio_data_dir(bio) == WRITE && pb->block) {
struct pending_block *block = pb->block;
unsigned long flags;
spin_lock_irqsave(&lc->blocks_lock, flags);
if (block->flags & LOG_FLUSH_FLAG) {
list_splice_tail_init(&block->list, &lc->logging_blocks);
list_add_tail(&block->list, &lc->logging_blocks);
wake_up_process(lc->log_kthread);
} else if (block->flags & LOG_FUA_FLAG) {
list_add_tail(&block->list, &lc->logging_blocks);
wake_up_process(lc->log_kthread);
} else
list_add_tail(&block->list, &lc->unflushed_blocks);
spin_unlock_irqrestore(&lc->blocks_lock, flags);
}
return DM_ENDIO_DONE;
}
/*
* INFO format: <logged entries> <highest allocated sector>
*/
static void log_writes_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result,
unsigned int maxlen)
{
unsigned int sz = 0;
struct log_writes_c *lc = ti->private;
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%llu %llu", lc->logged_entries,
(unsigned long long)lc->next_sector - 1);
if (!lc->logging_enabled)
DMEMIT(" logging_disabled");
break;
case STATUSTYPE_TABLE:
DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
break;
case STATUSTYPE_IMA:
*result = '\0';
break;
}
}
static int log_writes_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev)
{
struct log_writes_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
*bdev = dev->bdev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
if (ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
}
static int log_writes_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn,
void *data)
{
struct log_writes_c *lc = ti->private;
return fn(ti, lc->dev, 0, ti->len, data);
}
/*
* Messages supported:
* mark <mark data> - specify the marked data.
*/
static int log_writes_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct log_writes_c *lc = ti->private;
if (argc != 2) {
DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc);
return r;
}
if (!strcasecmp(argv[0], "mark"))
r = log_mark(lc, argv[1]);
else
DMWARN("Unrecognised log writes target message received: %s", argv[0]);
return r;
}
static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct log_writes_c *lc = ti->private;
if (!bdev_max_discard_sectors(lc->dev->bdev)) {
lc->device_supports_discard = false;
limits->discard_granularity = lc->sectorsize;
limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
}
limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
limits->io_min = limits->physical_block_size;
limits->dma_alignment = limits->logical_block_size - 1;
}
#if IS_ENABLED(CONFIG_FS_DAX)
static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
pgoff_t *pgoff)
{
struct log_writes_c *lc = ti->private;
*pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT);
return lc->dev->dax_dev;
}
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
pfn_t *pfn)
{
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
}
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT);
}
static size_t log_writes_dax_recovery_write(struct dm_target *ti,
pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
{
struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
}
#else
#define log_writes_dax_direct_access NULL
#define log_writes_dax_zero_page_range NULL
#define log_writes_dax_recovery_write NULL
#endif
static struct target_type log_writes_target = {
.name = "log-writes",
.version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = log_writes_ctr,
.dtr = log_writes_dtr,
.map = log_writes_map,
.end_io = normal_end_io,
.status = log_writes_status,
.prepare_ioctl = log_writes_prepare_ioctl,
.message = log_writes_message,
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
.direct_access = log_writes_dax_direct_access,
.dax_zero_page_range = log_writes_dax_zero_page_range,
.dax_recovery_write = log_writes_dax_recovery_write,
};
module_dm(log_writes);
MODULE_DESCRIPTION(DM_NAME " log writes target");
MODULE_AUTHOR("Josef Bacik <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/md/dm-log-writes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Google, Inc.
*
* Author: Sami Tolvanen <[email protected]>
*/
#include "dm-verity-fec.h"
#include <linux/math64.h>
#define DM_MSG_PREFIX "verity-fec"
/*
* If error correction has been configured, returns true.
*/
bool verity_fec_is_enabled(struct dm_verity *v)
{
return v->fec && v->fec->dev;
}
/*
* Return a pointer to dm_verity_fec_io after dm_verity_io and its variable
* length fields.
*/
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
{
return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
}
/*
* Return an interleaved offset for a byte in RS block.
*/
static inline u64 fec_interleave(struct dm_verity *v, u64 offset)
{
u32 mod;
mod = do_div(offset, v->fec->rsn);
return offset + mod * (v->fec->rounds << v->data_dev_block_bits);
}
/*
* Decode an RS block using Reed-Solomon.
*/
static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
u8 *data, u8 *fec, int neras)
{
int i;
uint16_t par[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
for (i = 0; i < v->fec->roots; i++)
par[i] = fec[i];
return decode_rs8(fio->rs, data, par, v->fec->rsn, NULL, neras,
fio->erasures, 0, NULL);
}
/*
* Read error-correcting codes for the requested RS block. Returns a pointer
* to the data block. Caller is responsible for releasing buf.
*/
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
unsigned int *offset, struct dm_buffer **buf)
{
u64 position, block, rem;
u8 *res;
position = (index + rsb) * v->fec->roots;
block = div64_u64_rem(position, v->fec->io_size, &rem);
*offset = (unsigned int)rem;
res = dm_bufio_read(v->fec->bufio, block, buf);
if (IS_ERR(res)) {
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
v->data_dev->name, (unsigned long long)rsb,
(unsigned long long)block, PTR_ERR(res));
*buf = NULL;
}
return res;
}
/* Loop over each preallocated buffer slot. */
#define fec_for_each_prealloc_buffer(__i) \
for (__i = 0; __i < DM_VERITY_FEC_BUF_PREALLOC; __i++)
/* Loop over each extra buffer slot. */
#define fec_for_each_extra_buffer(io, __i) \
for (__i = DM_VERITY_FEC_BUF_PREALLOC; __i < DM_VERITY_FEC_BUF_MAX; __i++)
/* Loop over each allocated buffer. */
#define fec_for_each_buffer(io, __i) \
for (__i = 0; __i < (io)->nbufs; __i++)
/* Loop over each RS block in each allocated buffer. */
#define fec_for_each_buffer_rs_block(io, __i, __j) \
fec_for_each_buffer(io, __i) \
for (__j = 0; __j < 1 << DM_VERITY_FEC_BUF_RS_BITS; __j++)
/*
* Return a pointer to the current RS block when called inside
* fec_for_each_buffer_rs_block.
*/
static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
struct dm_verity_fec_io *fio,
unsigned int i, unsigned int j)
{
return &fio->bufs[i][j * v->fec->rsn];
}
/*
* Return an index to the current RS block when called inside
* fec_for_each_buffer_rs_block.
*/
static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
{
return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
}
/*
* Decode all RS blocks from buffers and copy corrected bytes into fio->output
* starting from block_offset.
*/
static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
u64 rsb, int byte_index, unsigned int block_offset,
int neras)
{
int r, corrected = 0, res;
struct dm_buffer *buf;
unsigned int n, i, offset;
u8 *par, *block;
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
if (IS_ERR(par))
return PTR_ERR(par);
/*
* Decode the RS blocks we have in bufs. Each RS block results in
* one corrected target byte and consumes fec->roots parity bytes.
*/
fec_for_each_buffer_rs_block(fio, n, i) {
block = fec_buffer_rs_block(v, fio, n, i);
res = fec_decode_rs8(v, fio, block, &par[offset], neras);
if (res < 0) {
r = res;
goto error;
}
corrected += res;
fio->output[block_offset] = block[byte_index];
block_offset++;
if (block_offset >= 1 << v->data_dev_block_bits)
goto done;
/* read the next block when we run out of parity bytes */
offset += v->fec->roots;
if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
if (IS_ERR(par))
return PTR_ERR(par);
}
}
done:
r = corrected;
error:
dm_bufio_release(buf);
if (r < 0 && neras)
DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
v->data_dev->name, (unsigned long long)rsb, r);
else if (r > 0)
DMWARN_LIMIT("%s: FEC %llu: corrected %d errors",
v->data_dev->name, (unsigned long long)rsb, r);
return r;
}
/*
* Locate data block erasures using verity hashes.
*/
static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
u8 *want_digest, u8 *data)
{
if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->data_dev_block_bits,
verity_io_real_digest(v, io))))
return 0;
return memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) != 0;
}
/*
* Read data blocks that are part of the RS block and deinterleave as much as
* fits into buffers. Check for erasure locations if @neras is non-NULL.
*/
static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
u64 rsb, u64 target, unsigned int block_offset,
int *neras)
{
bool is_zero;
int i, j, target_index = -1;
struct dm_buffer *buf;
struct dm_bufio_client *bufio;
struct dm_verity_fec_io *fio = fec_io(io);
u64 block, ileaved;
u8 *bbuf, *rs_block;
u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned int n, k;
if (neras)
*neras = 0;
if (WARN_ON(v->digest_size > sizeof(want_digest)))
return -EINVAL;
/*
* read each of the rsn data blocks that are part of the RS block, and
* interleave contents to available bufs
*/
for (i = 0; i < v->fec->rsn; i++) {
ileaved = fec_interleave(v, rsb * v->fec->rsn + i);
/*
* target is the data block we want to correct, target_index is
* the index of this block within the rsn RS blocks
*/
if (ileaved == target)
target_index = i;
block = ileaved >> v->data_dev_block_bits;
bufio = v->fec->data_bufio;
if (block >= v->data_blocks) {
block -= v->data_blocks;
/*
* blocks outside the area were assumed to contain
* zeros when encoding data was generated
*/
if (unlikely(block >= v->fec->hash_blocks))
continue;
block += v->hash_start;
bufio = v->bufio;
}
bbuf = dm_bufio_read(bufio, block, &buf);
if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
(unsigned long long)rsb,
(unsigned long long)block, PTR_ERR(bbuf));
/* assume the block is corrupted */
if (neras && *neras <= v->fec->roots)
fio->erasures[(*neras)++] = i;
continue;
}
/* locate erasures if the block is on the data device */
if (bufio == v->fec->data_bufio &&
verity_hash_for_block(v, io, block, want_digest,
&is_zero) == 0) {
/* skip known zero blocks entirely */
if (is_zero)
goto done;
/*
* skip if we have already found the theoretical
* maximum number (i.e. fec->roots) of erasures
*/
if (neras && *neras <= v->fec->roots &&
fec_is_erasure(v, io, want_digest, bbuf))
fio->erasures[(*neras)++] = i;
}
/*
* deinterleave and copy the bytes that fit into bufs,
* starting from block_offset
*/
fec_for_each_buffer_rs_block(fio, n, j) {
k = fec_buffer_rs_index(n, j) + block_offset;
if (k >= 1 << v->data_dev_block_bits)
goto done;
rs_block = fec_buffer_rs_block(v, fio, n, j);
rs_block[i] = bbuf[k];
}
done:
dm_bufio_release(buf);
}
return target_index;
}
/*
* Allocate RS control structure and FEC buffers from preallocated mempools,
* and attempt to allocate as many extra buffers as available.
*/
static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
unsigned int n;
if (!fio->rs)
fio->rs = mempool_alloc(&v->fec->rs_pool, GFP_NOIO);
fec_for_each_prealloc_buffer(n) {
if (fio->bufs[n])
continue;
fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT);
if (unlikely(!fio->bufs[n])) {
DMERR("failed to allocate FEC buffer");
return -ENOMEM;
}
}
/* try to allocate the maximum number of buffers */
fec_for_each_extra_buffer(fio, n) {
if (fio->bufs[n])
continue;
fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
}
fio->nbufs = n;
if (!fio->output)
fio->output = mempool_alloc(&v->fec->output_pool, GFP_NOIO);
return 0;
}
/*
* Initialize buffers and clear erasures. fec_read_bufs() assumes buffers are
* zeroed before deinterleaving.
*/
static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
unsigned int n;
fec_for_each_buffer(fio, n)
memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
memset(fio->erasures, 0, sizeof(fio->erasures));
}
/*
* Decode all RS blocks in a single data block and return the target block
* (indicated by @offset) in fio->output. If @use_erasures is non-zero, uses
* hashes to locate erasures.
*/
static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_fec_io *fio, u64 rsb, u64 offset,
bool use_erasures)
{
int r, neras = 0;
unsigned int pos;
r = fec_alloc_bufs(v, fio);
if (unlikely(r < 0))
return r;
for (pos = 0; pos < 1 << v->data_dev_block_bits; ) {
fec_init_bufs(v, fio);
r = fec_read_bufs(v, io, rsb, offset, pos,
use_erasures ? &neras : NULL);
if (unlikely(r < 0))
return r;
r = fec_decode_bufs(v, fio, rsb, r, pos, neras);
if (r < 0)
return r;
pos += fio->nbufs << DM_VERITY_FEC_BUF_RS_BITS;
}
/* Always re-validate the corrected block against the expected hash */
r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
1 << v->data_dev_block_bits,
verity_io_real_digest(v, io));
if (unlikely(r < 0))
return r;
if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io),
v->digest_size)) {
DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)",
v->data_dev->name, (unsigned long long)rsb, neras);
return -EILSEQ;
}
return 0;
}
static int fec_bv_copy(struct dm_verity *v, struct dm_verity_io *io, u8 *data,
size_t len)
{
struct dm_verity_fec_io *fio = fec_io(io);
memcpy(data, &fio->output[fio->output_pos], len);
fio->output_pos += len;
return 0;
}
/*
* Correct errors in a block. Copies corrected block to dest if non-NULL,
* otherwise to a bio_vec starting from iter.
*/
int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
enum verity_block_type type, sector_t block, u8 *dest,
struct bvec_iter *iter)
{
int r;
struct dm_verity_fec_io *fio = fec_io(io);
u64 offset, res, rsb;
if (!verity_fec_is_enabled(v))
return -EOPNOTSUPP;
if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
return -EIO;
}
fio->level++;
if (type == DM_VERITY_BLOCK_TYPE_METADATA)
block = block - v->hash_start + v->data_blocks;
/*
* For RS(M, N), the continuous FEC data is divided into blocks of N
* bytes. Since block size may not be divisible by N, the last block
* is zero padded when decoding.
*
* Each byte of the block is covered by a different RS(M, N) code,
* and each code is interleaved over N blocks to make it less likely
* that bursty corruption will leave us in unrecoverable state.
*/
offset = block << v->data_dev_block_bits;
res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits);
/*
* The base RS block we can feed to the interleaver to find out all
* blocks required for decoding.
*/
rsb = offset - res * (v->fec->rounds << v->data_dev_block_bits);
/*
* Locating erasures is slow, so attempt to recover the block without
* them first. Do a second attempt with erasures if the corruption is
* bad enough.
*/
r = fec_decode_rsb(v, io, fio, rsb, offset, false);
if (r < 0) {
r = fec_decode_rsb(v, io, fio, rsb, offset, true);
if (r < 0)
goto done;
}
if (dest)
memcpy(dest, fio->output, 1 << v->data_dev_block_bits);
else if (iter) {
fio->output_pos = 0;
r = verity_for_bv_block(v, io, iter, fec_bv_copy);
}
done:
fio->level--;
return r;
}
/*
* Clean up per-bio data.
*/
void verity_fec_finish_io(struct dm_verity_io *io)
{
unsigned int n;
struct dm_verity_fec *f = io->v->fec;
struct dm_verity_fec_io *fio = fec_io(io);
if (!verity_fec_is_enabled(io->v))
return;
mempool_free(fio->rs, &f->rs_pool);
fec_for_each_prealloc_buffer(n)
mempool_free(fio->bufs[n], &f->prealloc_pool);
fec_for_each_extra_buffer(fio, n)
mempool_free(fio->bufs[n], &f->extra_pool);
mempool_free(fio->output, &f->output_pool);
}
/*
* Initialize per-bio data.
*/
void verity_fec_init_io(struct dm_verity_io *io)
{
struct dm_verity_fec_io *fio = fec_io(io);
if (!verity_fec_is_enabled(io->v))
return;
fio->rs = NULL;
memset(fio->bufs, 0, sizeof(fio->bufs));
fio->nbufs = 0;
fio->output = NULL;
fio->level = 0;
}
/*
* Append feature arguments and values to the status table.
*/
unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
char *result, unsigned int maxlen)
{
if (!verity_fec_is_enabled(v))
return sz;
DMEMIT(" " DM_VERITY_OPT_FEC_DEV " %s "
DM_VERITY_OPT_FEC_BLOCKS " %llu "
DM_VERITY_OPT_FEC_START " %llu "
DM_VERITY_OPT_FEC_ROOTS " %d",
v->fec->dev->name,
(unsigned long long)v->fec->blocks,
(unsigned long long)v->fec->start,
v->fec->roots);
return sz;
}
void verity_fec_dtr(struct dm_verity *v)
{
struct dm_verity_fec *f = v->fec;
if (!verity_fec_is_enabled(v))
goto out;
mempool_exit(&f->rs_pool);
mempool_exit(&f->prealloc_pool);
mempool_exit(&f->extra_pool);
mempool_exit(&f->output_pool);
kmem_cache_destroy(f->cache);
if (f->data_bufio)
dm_bufio_client_destroy(f->data_bufio);
if (f->bufio)
dm_bufio_client_destroy(f->bufio);
if (f->dev)
dm_put_device(v->ti, f->dev);
out:
kfree(f);
v->fec = NULL;
}
static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data)
{
struct dm_verity *v = pool_data;
return init_rs_gfp(8, 0x11d, 0, 1, v->fec->roots, gfp_mask);
}
static void fec_rs_free(void *element, void *pool_data)
{
struct rs_control *rs = element;
if (rs)
free_rs(rs);
}
bool verity_is_fec_opt_arg(const char *arg_name)
{
return (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV) ||
!strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS) ||
!strcasecmp(arg_name, DM_VERITY_OPT_FEC_START) ||
!strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS));
}
int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
unsigned int *argc, const char *arg_name)
{
int r;
struct dm_target *ti = v->ti;
const char *arg_value;
unsigned long long num_ll;
unsigned char num_c;
char dummy;
if (!*argc) {
ti->error = "FEC feature arguments require a value";
return -EINVAL;
}
arg_value = dm_shift_arg(as);
(*argc)--;
if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev);
if (r) {
ti->error = "FEC device lookup failed";
return r;
}
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS)) {
if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
>> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
return -EINVAL;
}
v->fec->blocks = num_ll;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_START)) {
if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) >>
(v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
ti->error = "Invalid " DM_VERITY_OPT_FEC_START;
return -EINVAL;
}
v->fec->start = num_ll;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS)) {
if (sscanf(arg_value, "%hhu%c", &num_c, &dummy) != 1 || !num_c ||
num_c < (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MAX_RSN) ||
num_c > (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN)) {
ti->error = "Invalid " DM_VERITY_OPT_FEC_ROOTS;
return -EINVAL;
}
v->fec->roots = num_c;
} else {
ti->error = "Unrecognized verity FEC feature request";
return -EINVAL;
}
return 0;
}
/*
* Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr.
*/
int verity_fec_ctr_alloc(struct dm_verity *v)
{
struct dm_verity_fec *f;
f = kzalloc(sizeof(struct dm_verity_fec), GFP_KERNEL);
if (!f) {
v->ti->error = "Cannot allocate FEC structure";
return -ENOMEM;
}
v->fec = f;
return 0;
}
/*
* Validate arguments and preallocate memory. Must be called after arguments
* have been parsed using verity_fec_parse_opt_args.
*/
int verity_fec_ctr(struct dm_verity *v)
{
struct dm_verity_fec *f = v->fec;
struct dm_target *ti = v->ti;
u64 hash_blocks, fec_blocks;
int ret;
if (!verity_fec_is_enabled(v)) {
verity_fec_dtr(v);
return 0;
}
/*
* FEC is computed over data blocks, possible metadata, and
* hash blocks. In other words, FEC covers total of fec_blocks
* blocks consisting of the following:
*
* data blocks | hash blocks | metadata (optional)
*
* We allow metadata after hash blocks to support a use case
* where all data is stored on the same device and FEC covers
* the entire area.
*
* If metadata is included, we require it to be available on the
* hash device after the hash blocks.
*/
hash_blocks = v->hash_blocks - v->hash_start;
/*
* Require matching block sizes for data and hash devices for
* simplicity.
*/
if (v->data_dev_block_bits != v->hash_dev_block_bits) {
ti->error = "Block sizes must match to use FEC";
return -EINVAL;
}
if (!f->roots) {
ti->error = "Missing " DM_VERITY_OPT_FEC_ROOTS;
return -EINVAL;
}
f->rsn = DM_VERITY_FEC_RSM - f->roots;
if (!f->blocks) {
ti->error = "Missing " DM_VERITY_OPT_FEC_BLOCKS;
return -EINVAL;
}
f->rounds = f->blocks;
if (sector_div(f->rounds, f->rsn))
f->rounds++;
/*
* Due to optional metadata, f->blocks can be larger than
* data_blocks and hash_blocks combined.
*/
if (f->blocks < v->data_blocks + hash_blocks || !f->rounds) {
ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
return -EINVAL;
}
/*
* Metadata is accessed through the hash device, so we require
* it to be large enough.
*/
f->hash_blocks = f->blocks - v->data_blocks;
if (dm_bufio_get_device_size(v->bufio) < f->hash_blocks) {
ti->error = "Hash device is too small for "
DM_VERITY_OPT_FEC_BLOCKS;
return -E2BIG;
}
if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
f->io_size = 1 << v->data_dev_block_bits;
else
f->io_size = v->fec->roots << SECTOR_SHIFT;
f->bufio = dm_bufio_client_create(f->dev->bdev,
f->io_size,
1, 0, NULL, NULL, 0);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
return PTR_ERR(f->bufio);
}
dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT));
fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT);
if (dm_bufio_get_device_size(f->bufio) < fec_blocks) {
ti->error = "FEC device is too small";
return -E2BIG;
}
f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
1 << v->data_dev_block_bits,
1, 0, NULL, NULL, 0);
if (IS_ERR(f->data_bufio)) {
ti->error = "Cannot initialize FEC data bufio client";
return PTR_ERR(f->data_bufio);
}
if (dm_bufio_get_device_size(f->data_bufio) < v->data_blocks) {
ti->error = "Data device is too small";
return -E2BIG;
}
/* Preallocate an rs_control structure for each worker thread */
ret = mempool_init(&f->rs_pool, num_online_cpus(), fec_rs_alloc,
fec_rs_free, (void *) v);
if (ret) {
ti->error = "Cannot allocate RS pool";
return ret;
}
f->cache = kmem_cache_create("dm_verity_fec_buffers",
f->rsn << DM_VERITY_FEC_BUF_RS_BITS,
0, 0, NULL);
if (!f->cache) {
ti->error = "Cannot create FEC buffer cache";
return -ENOMEM;
}
/* Preallocate DM_VERITY_FEC_BUF_PREALLOC buffers for each thread */
ret = mempool_init_slab_pool(&f->prealloc_pool, num_online_cpus() *
DM_VERITY_FEC_BUF_PREALLOC,
f->cache);
if (ret) {
ti->error = "Cannot allocate FEC buffer prealloc pool";
return ret;
}
ret = mempool_init_slab_pool(&f->extra_pool, 0, f->cache);
if (ret) {
ti->error = "Cannot allocate FEC buffer extra pool";
return ret;
}
/* Preallocate an output buffer for each thread */
ret = mempool_init_kmalloc_pool(&f->output_pool, num_online_cpus(),
1 << v->data_dev_block_bits);
if (ret) {
ti->error = "Cannot allocate FEC output pool";
return ret;
}
/* Reserve space for our per-bio data */
ti->per_io_data_size += sizeof(struct dm_verity_fec_io);
return 0;
}
| linux-master | drivers/md/dm-verity-fec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm-core.h"
#include "dm-rq.h"
#include <linux/blk-mq.h>
#define DM_MSG_PREFIX "core-rq"
/*
* One of these is allocated per request.
*/
struct dm_rq_target_io {
struct mapped_device *md;
struct dm_target *ti;
struct request *orig, *clone;
struct kthread_work work;
blk_status_t error;
union map_info info;
struct dm_stats_aux stats_aux;
unsigned long duration_jiffies;
unsigned int n_sectors;
unsigned int completed;
};
#define DM_MQ_NR_HW_QUEUES 1
#define DM_MQ_QUEUE_DEPTH 2048
static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
/*
* Request-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
unsigned int dm_get_reserved_rq_based_ios(void)
{
return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
}
static unsigned int dm_get_blk_mq_nr_hw_queues(void)
{
return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
}
static unsigned int dm_get_blk_mq_queue_depth(void)
{
return __dm_get_module_param(&dm_mq_queue_depth,
DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
}
int dm_request_based(struct mapped_device *md)
{
return queue_is_mq(md->queue);
}
void dm_start_queue(struct request_queue *q)
{
blk_mq_unquiesce_queue(q);
blk_mq_kick_requeue_list(q);
}
void dm_stop_queue(struct request_queue *q)
{
blk_mq_quiesce_queue(q);
}
/*
* Partial completion handling for request-based dm
*/
static void end_clone_bio(struct bio *clone)
{
struct dm_rq_clone_bio_info *info =
container_of(clone, struct dm_rq_clone_bio_info, clone);
struct dm_rq_target_io *tio = info->tio;
unsigned int nr_bytes = info->orig->bi_iter.bi_size;
blk_status_t error = clone->bi_status;
bool is_last = !clone->bi_next;
bio_put(clone);
if (tio->error)
/*
* An error has already been detected on the request.
* Once error occurred, just let clone->end_io() handle
* the remainder.
*/
return;
else if (error) {
/*
* Don't notice the error to the upper layer yet.
* The error handling decision is made by the target driver,
* when the request is completed.
*/
tio->error = error;
goto exit;
}
/*
* I/O for the bio successfully completed.
* Notice the data completion to the upper layer.
*/
tio->completed += nr_bytes;
/*
* Update the original request.
* Do not use blk_mq_end_request() here, because it may complete
* the original request before the clone, and break the ordering.
*/
if (is_last)
exit:
blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
}
static struct dm_rq_target_io *tio_from_request(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
}
static void rq_end_stats(struct mapped_device *md, struct request *orig)
{
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
tio->duration_jiffies = jiffies - tio->duration_jiffies;
dm_stats_account_io(&md->stats, rq_data_dir(orig),
blk_rq_pos(orig), tio->n_sectors, true,
tio->duration_jiffies, &tio->stats_aux);
}
}
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
static void rq_completed(struct mapped_device *md)
{
/*
* dm_put() must be at the end of this function. See the comment above
*/
dm_put(md);
}
/*
* Complete the clone and the original request.
* Must be called without clone's queue lock held,
* see end_clone_request() for more details.
*/
static void dm_end_request(struct request *clone, blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
blk_rq_unprep_clone(clone);
tio->ti->type->release_clone_rq(clone, NULL);
rq_end_stats(md, rq);
blk_mq_end_request(rq, error);
rq_completed(md);
}
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
blk_mq_delay_kick_requeue_list(q, msecs);
}
void dm_mq_kick_requeue_list(struct mapped_device *md)
{
__dm_mq_kick_requeue_list(md->queue, 0);
}
EXPORT_SYMBOL(dm_mq_kick_requeue_list);
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{
blk_mq_requeue_request(rq, false);
__dm_mq_kick_requeue_list(rq->q, msecs);
}
static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
{
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
unsigned long delay_ms = delay_requeue ? 100 : 0;
rq_end_stats(md, rq);
if (tio->clone) {
blk_rq_unprep_clone(tio->clone);
tio->ti->type->release_clone_rq(tio->clone, NULL);
}
dm_mq_delay_requeue_request(rq, delay_ms);
rq_completed(md);
}
static void dm_done(struct request *clone, blk_status_t error, bool mapped)
{
int r = DM_ENDIO_DONE;
struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = NULL;
if (tio->ti) {
rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
if (unlikely(error == BLK_STS_TARGET)) {
if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors)
disable_discard(tio->md);
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md);
}
switch (r) {
case DM_ENDIO_DONE:
/* The target wants to complete the I/O */
dm_end_request(clone, error);
break;
case DM_ENDIO_INCOMPLETE:
/* The target will handle the I/O */
return;
case DM_ENDIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
break;
case DM_ENDIO_DELAY_REQUEUE:
/* The target wants to requeue the I/O after a delay */
dm_requeue_original_request(tio, true);
break;
default:
DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
/*
* Request completion handler for request-based dm
*/
static void dm_softirq_done(struct request *rq)
{
bool mapped = true;
struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
if (!clone) {
struct mapped_device *md = tio->md;
rq_end_stats(md, rq);
blk_mq_end_request(rq, tio->error);
rq_completed(md);
return;
}
if (rq->rq_flags & RQF_FAILED)
mapped = false;
dm_done(clone, tio->error, mapped);
}
/*
* Complete the clone and the original request with the error status
* through softirq context.
*/
static void dm_complete_request(struct request *rq, blk_status_t error)
{
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
/*
* Complete the not-mapped clone and the original request with the error status
* through softirq context.
* Target's rq_end_io() function isn't called.
* This may be used when the target's clone_and_map_rq() function fails.
*/
static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
{
rq->rq_flags |= RQF_FAILED;
dm_complete_request(rq, error);
}
static enum rq_end_io_ret end_clone_request(struct request *clone,
blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
dm_complete_request(tio->orig, error);
return RQ_END_IO_NONE;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
struct dm_rq_target_io *tio = data;
struct dm_rq_clone_bio_info *info =
container_of(bio, struct dm_rq_clone_bio_info, clone);
info->orig = bio_orig;
info->tio = tio;
bio->bi_end_io = end_clone_bio;
return 0;
}
static int setup_clone(struct request *clone, struct request *rq,
struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
int r;
r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
dm_rq_bio_constructor, tio);
if (r)
return r;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
tio->clone = clone;
return 0;
}
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
struct mapped_device *md)
{
tio->md = md;
tio->ti = NULL;
tio->clone = NULL;
tio->orig = rq;
tio->error = 0;
tio->completed = 0;
/*
* Avoid initializing info for blk-mq; it passes
* target-specific data through info.ptr
* (see: dm_mq_init_request)
*/
if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
}
/*
* Returns:
* DM_MAPIO_* : the request has been processed as indicated
* DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
* < 0 : the request was completed due to failure
*/
static int map_request(struct dm_rq_target_io *tio)
{
int r;
struct dm_target *ti = tio->ti;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request *clone = NULL;
blk_status_t ret;
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
break;
case DM_MAPIO_REMAPPED:
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone, &tio->info);
return DM_MAPIO_REQUEUE;
}
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
ret = blk_insert_cloned_request(clone);
switch (ret) {
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
blk_rq_unprep_clone(clone);
blk_mq_cleanup_rq(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
default:
/* must complete clone in terms of original request */
dm_complete_request(rq, ret);
}
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
break;
case DM_MAPIO_DELAY_REQUEUE:
/* The target wants to requeue the I/O after a delay */
dm_requeue_original_request(tio, true);
break;
case DM_MAPIO_KILL:
/* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, BLK_STS_IOERR);
break;
default:
DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
return r;
}
/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
{
return sprintf(buf, "%u\n", 0);
}
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
const char *buf, size_t count)
{
return count;
}
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
blk_mq_start_request(orig);
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
tio->duration_jiffies = jiffies;
tio->n_sectors = blk_rq_sectors(orig);
dm_stats_account_io(&md->stats, rq_data_dir(orig),
blk_rq_pos(orig), tio->n_sectors, false, 0,
&tio->stats_aux);
}
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
* because the device may be closed during the request completion
* when all bios are completed.
* See the comment in rq_completed() too.
*/
dm_get(md);
}
static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct mapped_device *md = set->driver_data;
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
/*
* Must initialize md member of tio, otherwise it won't
* be available in dm_mq_queue_rq.
*/
tio->md = md;
if (md->init_tio_pdu) {
/* target-specific per-io data is immediately after the tio */
tio->info.ptr = tio + 1;
}
return 0;
}
static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
struct mapped_device *md = tio->md;
struct dm_target *ti = md->immutable_target;
/*
* blk-mq's unquiesce may come from outside events, such as
* elevator switch, updating nr_requests or others, and request may
* come during suspend, so simply ask for blk-mq to requeue it.
*/
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
return BLK_STS_RESOURCE;
if (unlikely(!ti)) {
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
if (unlikely(!map)) {
dm_put_live_table(md, srcu_idx);
return BLK_STS_RESOURCE;
}
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
}
if (ti->type->busy && ti->type->busy(ti))
return BLK_STS_RESOURCE;
dm_start_request(md, rq);
/* Init tio using md established in .init_request */
init_tio(tio, rq, md);
/*
* Establish tio->ti before calling map_request().
*/
tio->ti = ti;
/* Direct call is fine since .queue_rq allows allocations */
if (map_request(tio) == DM_MAPIO_REQUEUE) {
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md);
return BLK_STS_RESOURCE;
}
return BLK_STS_OK;
}
static const struct blk_mq_ops dm_mq_ops = {
.queue_rq = dm_mq_queue_rq,
.complete = dm_softirq_done,
.init_request = dm_mq_init_request,
};
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
struct dm_target *immutable_tgt;
int err;
md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
if (!md->tag_set)
return -ENOMEM;
md->tag_set->ops = &dm_mq_ops;
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md->tag_set->numa_node = md->numa_node_id;
md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md->tag_set->driver_data = md;
md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
immutable_tgt = dm_table_get_immutable_target(t);
if (immutable_tgt && immutable_tgt->per_io_data_size) {
/* any target-specific per-io data is immediately after the tio */
md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
md->init_tio_pdu = true;
}
err = blk_mq_alloc_tag_set(md->tag_set);
if (err)
goto out_kfree_tag_set;
err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
if (err)
goto out_tag_set;
return 0;
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
kfree(md->tag_set);
md->tag_set = NULL;
return err;
}
void dm_mq_cleanup_mapped_device(struct mapped_device *md)
{
if (md->tag_set) {
blk_mq_free_tag_set(md->tag_set);
kfree(md->tag_set);
md->tag_set = NULL;
}
}
module_param(reserved_rq_based_ios, uint, 0644);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
/* Unused, but preserved for userspace compatibility */
static bool use_blk_mq = true;
module_param(use_blk_mq, bool, 0644);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
module_param(dm_mq_nr_hw_queues, uint, 0644);
MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
module_param(dm_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
| linux-master | drivers/md/dm-rq.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/dm-verity-loadpin.h>
#include "dm.h"
#include "dm-core.h"
#include "dm-verity.h"
#define DM_MSG_PREFIX "verity-loadpin"
LIST_HEAD(dm_verity_loadpin_trusted_root_digests);
static bool is_trusted_verity_target(struct dm_target *ti)
{
int verity_mode;
u8 *root_digest;
unsigned int digest_size;
struct dm_verity_loadpin_trusted_root_digest *trd;
bool trusted = false;
if (!dm_is_verity_target(ti))
return false;
verity_mode = dm_verity_get_mode(ti);
if ((verity_mode != DM_VERITY_MODE_EIO) &&
(verity_mode != DM_VERITY_MODE_RESTART) &&
(verity_mode != DM_VERITY_MODE_PANIC))
return false;
if (dm_verity_get_root_digest(ti, &root_digest, &digest_size))
return false;
list_for_each_entry(trd, &dm_verity_loadpin_trusted_root_digests, node) {
if ((trd->len == digest_size) &&
!memcmp(trd->data, root_digest, digest_size)) {
trusted = true;
break;
}
}
kfree(root_digest);
return trusted;
}
/*
* Determines whether the file system of a superblock is located on
* a verity device that is trusted by LoadPin.
*/
bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
{
struct mapped_device *md;
struct dm_table *table;
struct dm_target *ti;
int srcu_idx;
bool trusted = false;
if (bdev == NULL)
return false;
if (list_empty(&dm_verity_loadpin_trusted_root_digests))
return false;
md = dm_get_md(bdev->bd_dev);
if (!md)
return false;
table = dm_get_live_table(md, &srcu_idx);
if (table->num_targets != 1)
goto out;
ti = dm_table_get_target(table, 0);
if (is_trusted_verity_target(ti))
trusted = true;
out:
dm_put_live_table(md, srcu_idx);
dm_put(md);
return trusted;
}
| linux-master | drivers/md/dm-verity-loadpin.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.