python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/devfreq/governor_passive.c * * Copyright (C) 2016 Samsung Electronics * Author: Chanwoo Choi <[email protected]> * Author: MyungJoo Ham <[email protected]> */ #include <linux/module.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/devfreq.h> #include <linux/units.h> #include "governor.h" static struct devfreq_cpu_data * get_parent_cpu_data(struct devfreq_passive_data *p_data, struct cpufreq_policy *policy) { struct devfreq_cpu_data *parent_cpu_data; if (!p_data || !policy) return NULL; list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node) if (parent_cpu_data->first_cpu == cpumask_first(policy->related_cpus)) return parent_cpu_data; return NULL; } static void delete_parent_cpu_data(struct devfreq_passive_data *p_data) { struct devfreq_cpu_data *parent_cpu_data, *tmp; list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) { list_del(&parent_cpu_data->node); if (parent_cpu_data->opp_table) dev_pm_opp_put_opp_table(parent_cpu_data->opp_table); kfree(parent_cpu_data); } } static unsigned long get_target_freq_by_required_opp(struct device *p_dev, struct opp_table *p_opp_table, struct opp_table *opp_table, unsigned long *freq) { struct dev_pm_opp *opp = NULL, *p_opp = NULL; unsigned long target_freq; if (!p_dev || !p_opp_table || !opp_table || !freq) return 0; p_opp = devfreq_recommended_opp(p_dev, freq, 0); if (IS_ERR(p_opp)) return 0; opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp); dev_pm_opp_put(p_opp); if (IS_ERR(opp)) return 0; target_freq = dev_pm_opp_get_freq(opp); dev_pm_opp_put(opp); return target_freq; } static int get_target_freq_with_cpufreq(struct devfreq *devfreq, unsigned long *target_freq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq_cpu_data *parent_cpu_data; struct cpufreq_policy *policy; unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent; unsigned long dev_min, dev_max; unsigned long freq = 0; int ret = 0; for_each_online_cpu(cpu) { policy = cpufreq_cpu_get(cpu); if (!policy) { ret = -EINVAL; continue; } parent_cpu_data = get_parent_cpu_data(p_data, policy); if (!parent_cpu_data) { cpufreq_cpu_put(policy); continue; } /* Get target freq via required opps */ cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ; freq = get_target_freq_by_required_opp(parent_cpu_data->dev, parent_cpu_data->opp_table, devfreq->opp_table, &cpu_cur); if (freq) { *target_freq = max(freq, *target_freq); cpufreq_cpu_put(policy); continue; } /* Use interpolation if required opps is not available */ devfreq_get_freq_range(devfreq, &dev_min, &dev_max); cpu_min = parent_cpu_data->min_freq; cpu_max = parent_cpu_data->max_freq; cpu_cur = parent_cpu_data->cur_freq; cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min); freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100); *target_freq = max(freq, *target_freq); cpufreq_cpu_put(policy); } return ret; } static int get_target_freq_with_devfreq(struct devfreq *devfreq, unsigned long *freq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent; unsigned long child_freq = ULONG_MAX; int i, count; /* Get target freq via required opps */ child_freq = get_target_freq_by_required_opp(parent_devfreq->dev.parent, parent_devfreq->opp_table, devfreq->opp_table, freq); if (child_freq) goto out; /* Use interpolation if required opps is not available */ for (i = 0; i < parent_devfreq->max_state; i++) if (parent_devfreq->freq_table[i] == *freq) break; if (i == parent_devfreq->max_state) return -EINVAL; if (i < devfreq->max_state) { child_freq = devfreq->freq_table[i]; } else { count = devfreq->max_state; child_freq = devfreq->freq_table[count - 1]; } out: *freq = child_freq; return 0; } static int devfreq_passive_get_target_freq(struct devfreq *devfreq, unsigned long *freq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; int ret; if (!p_data) return -EINVAL; /* * If the devfreq device with passive governor has the specific method * to determine the next frequency, should use the get_target_freq() * of struct devfreq_passive_data. */ if (p_data->get_target_freq) return p_data->get_target_freq(devfreq, freq); switch (p_data->parent_type) { case DEVFREQ_PARENT_DEV: ret = get_target_freq_with_devfreq(devfreq, freq); break; case CPUFREQ_PARENT_DEV: ret = get_target_freq_with_cpufreq(devfreq, freq); break; default: ret = -EINVAL; dev_err(&devfreq->dev, "Invalid parent type\n"); break; } return ret; } static int cpufreq_passive_notifier_call(struct notifier_block *nb, unsigned long event, void *ptr) { struct devfreq_passive_data *p_data = container_of(nb, struct devfreq_passive_data, nb); struct devfreq *devfreq = (struct devfreq *)p_data->this; struct devfreq_cpu_data *parent_cpu_data; struct cpufreq_freqs *freqs = ptr; unsigned int cur_freq; int ret; if (event != CPUFREQ_POSTCHANGE || !freqs) return 0; parent_cpu_data = get_parent_cpu_data(p_data, freqs->policy); if (!parent_cpu_data || parent_cpu_data->cur_freq == freqs->new) return 0; cur_freq = parent_cpu_data->cur_freq; parent_cpu_data->cur_freq = freqs->new; mutex_lock(&devfreq->lock); ret = devfreq_update_target(devfreq, freqs->new); mutex_unlock(&devfreq->lock); if (ret) { parent_cpu_data->cur_freq = cur_freq; dev_err(&devfreq->dev, "failed to update the frequency.\n"); return ret; } return 0; } static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; int ret; if (p_data->nb.notifier_call) { ret = cpufreq_unregister_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER); if (ret < 0) return ret; } delete_parent_cpu_data(p_data); return 0; } static int cpufreq_passive_register_notifier(struct devfreq *devfreq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct device *dev = devfreq->dev.parent; struct opp_table *opp_table = NULL; struct devfreq_cpu_data *parent_cpu_data; struct cpufreq_policy *policy; struct device *cpu_dev; unsigned int cpu; int ret; p_data->cpu_data_list = (struct list_head)LIST_HEAD_INIT(p_data->cpu_data_list); p_data->nb.notifier_call = cpufreq_passive_notifier_call; ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { dev_err(dev, "failed to register cpufreq notifier\n"); p_data->nb.notifier_call = NULL; goto err; } for_each_possible_cpu(cpu) { policy = cpufreq_cpu_get(cpu); if (!policy) { ret = -EPROBE_DEFER; goto err; } parent_cpu_data = get_parent_cpu_data(p_data, policy); if (parent_cpu_data) { cpufreq_cpu_put(policy); continue; } parent_cpu_data = kzalloc(sizeof(*parent_cpu_data), GFP_KERNEL); if (!parent_cpu_data) { ret = -ENOMEM; goto err_put_policy; } cpu_dev = get_cpu_device(cpu); if (!cpu_dev) { dev_err(dev, "failed to get cpu device\n"); ret = -ENODEV; goto err_free_cpu_data; } opp_table = dev_pm_opp_get_opp_table(cpu_dev); if (IS_ERR(opp_table)) { dev_err(dev, "failed to get opp_table of cpu%d\n", cpu); ret = PTR_ERR(opp_table); goto err_free_cpu_data; } parent_cpu_data->dev = cpu_dev; parent_cpu_data->opp_table = opp_table; parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus); parent_cpu_data->cur_freq = policy->cur; parent_cpu_data->min_freq = policy->cpuinfo.min_freq; parent_cpu_data->max_freq = policy->cpuinfo.max_freq; list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list); cpufreq_cpu_put(policy); } mutex_lock(&devfreq->lock); ret = devfreq_update_target(devfreq, 0L); mutex_unlock(&devfreq->lock); if (ret) dev_err(dev, "failed to update the frequency\n"); return ret; err_free_cpu_data: kfree(parent_cpu_data); err_put_policy: cpufreq_cpu_put(policy); err: return ret; } static int devfreq_passive_notifier_call(struct notifier_block *nb, unsigned long event, void *ptr) { struct devfreq_passive_data *data = container_of(nb, struct devfreq_passive_data, nb); struct devfreq *devfreq = (struct devfreq *)data->this; struct devfreq *parent = (struct devfreq *)data->parent; struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr; unsigned long freq = freqs->new; int ret = 0; mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING); switch (event) { case DEVFREQ_PRECHANGE: if (parent->previous_freq > freq) ret = devfreq_update_target(devfreq, freq); break; case DEVFREQ_POSTCHANGE: if (parent->previous_freq < freq) ret = devfreq_update_target(devfreq, freq); break; } mutex_unlock(&devfreq->lock); if (ret < 0) dev_warn(&devfreq->dev, "failed to update devfreq using passive governor\n"); return NOTIFY_DONE; } static int devfreq_passive_unregister_notifier(struct devfreq *devfreq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent = (struct devfreq *)p_data->parent; struct notifier_block *nb = &p_data->nb; return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); } static int devfreq_passive_register_notifier(struct devfreq *devfreq) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent = (struct devfreq *)p_data->parent; struct notifier_block *nb = &p_data->nb; if (!parent) return -EPROBE_DEFER; nb->notifier_call = devfreq_passive_notifier_call; return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); } static int devfreq_passive_event_handler(struct devfreq *devfreq, unsigned int event, void *data) { struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; int ret = 0; if (!p_data) return -EINVAL; p_data->this = devfreq; switch (event) { case DEVFREQ_GOV_START: if (p_data->parent_type == DEVFREQ_PARENT_DEV) ret = devfreq_passive_register_notifier(devfreq); else if (p_data->parent_type == CPUFREQ_PARENT_DEV) ret = cpufreq_passive_register_notifier(devfreq); break; case DEVFREQ_GOV_STOP: if (p_data->parent_type == DEVFREQ_PARENT_DEV) WARN_ON(devfreq_passive_unregister_notifier(devfreq)); else if (p_data->parent_type == CPUFREQ_PARENT_DEV) WARN_ON(cpufreq_passive_unregister_notifier(devfreq)); break; default: break; } return ret; } static struct devfreq_governor devfreq_passive = { .name = DEVFREQ_GOV_PASSIVE, .flags = DEVFREQ_GOV_FLAG_IMMUTABLE, .get_target_freq = devfreq_passive_get_target_freq, .event_handler = devfreq_passive_event_handler, }; static int __init devfreq_passive_init(void) { return devfreq_add_governor(&devfreq_passive); } subsys_initcall(devfreq_passive_init); static void __exit devfreq_passive_exit(void) { int ret; ret = devfreq_remove_governor(&devfreq_passive); if (ret) pr_err("%s: failed remove governor %d\n", __func__, ret); } module_exit(devfreq_passive_exit); MODULE_AUTHOR("Chanwoo Choi <[email protected]>"); MODULE_AUTHOR("MyungJoo Ham <[email protected]>"); MODULE_DESCRIPTION("DEVFREQ Passive governor"); MODULE_LICENSE("GPL v2");
linux-master
drivers/devfreq/governor_passive.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP */ #include <linux/clk.h> #include <linux/devfreq.h> #include <linux/device.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/slab.h> struct imx_bus { struct devfreq_dev_profile profile; struct devfreq *devfreq; struct clk *clk; struct platform_device *icc_pdev; }; static int imx_bus_target(struct device *dev, unsigned long *freq, u32 flags) { struct dev_pm_opp *new_opp; int ret; new_opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(new_opp)) { ret = PTR_ERR(new_opp); dev_err(dev, "failed to get recommended opp: %d\n", ret); return ret; } dev_pm_opp_put(new_opp); return dev_pm_opp_set_rate(dev, *freq); } static int imx_bus_get_cur_freq(struct device *dev, unsigned long *freq) { struct imx_bus *priv = dev_get_drvdata(dev); *freq = clk_get_rate(priv->clk); return 0; } static void imx_bus_exit(struct device *dev) { struct imx_bus *priv = dev_get_drvdata(dev); dev_pm_opp_of_remove_table(dev); platform_device_unregister(priv->icc_pdev); } /* imx_bus_init_icc() - register matching icc provider if required */ static int imx_bus_init_icc(struct device *dev) { struct imx_bus *priv = dev_get_drvdata(dev); const char *icc_driver_name; if (!of_get_property(dev->of_node, "#interconnect-cells", NULL)) return 0; if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) { dev_warn(dev, "imx interconnect drivers disabled\n"); return 0; } icc_driver_name = of_device_get_match_data(dev); if (!icc_driver_name) { dev_err(dev, "unknown interconnect driver\n"); return 0; } priv->icc_pdev = platform_device_register_data( dev, icc_driver_name, -1, NULL, 0); if (IS_ERR(priv->icc_pdev)) { dev_err(dev, "failed to register icc provider %s: %ld\n", icc_driver_name, PTR_ERR(priv->icc_pdev)); return PTR_ERR(priv->icc_pdev); } return 0; } static int imx_bus_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct imx_bus *priv; const char *gov = DEVFREQ_GOV_USERSPACE; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* * Fetch the clock to adjust but don't explicitly enable. * * For imx bus clock clk_set_rate is safe no matter if the clock is on * or off and some peripheral side-buses might be off unless enabled by * drivers for devices on those specific buses. * * Rate adjustment on a disabled bus clock just takes effect later. */ priv->clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->clk)) { ret = PTR_ERR(priv->clk); dev_err(dev, "failed to fetch clk: %d\n", ret); return ret; } platform_set_drvdata(pdev, priv); ret = dev_pm_opp_of_add_table(dev); if (ret < 0) { dev_err(dev, "failed to get OPP table\n"); return ret; } priv->profile.target = imx_bus_target; priv->profile.exit = imx_bus_exit; priv->profile.get_cur_freq = imx_bus_get_cur_freq; priv->profile.initial_freq = clk_get_rate(priv->clk); priv->devfreq = devm_devfreq_add_device(dev, &priv->profile, gov, NULL); if (IS_ERR(priv->devfreq)) { ret = PTR_ERR(priv->devfreq); dev_err(dev, "failed to add devfreq device: %d\n", ret); goto err; } ret = imx_bus_init_icc(dev); if (ret) goto err; return 0; err: dev_pm_opp_of_remove_table(dev); return ret; } static const struct of_device_id imx_bus_of_match[] = { { .compatible = "fsl,imx8mq-noc", .data = "imx8mq-interconnect", }, { .compatible = "fsl,imx8mm-noc", .data = "imx8mm-interconnect", }, { .compatible = "fsl,imx8mn-noc", .data = "imx8mn-interconnect", }, { .compatible = "fsl,imx8mp-noc", .data = "imx8mp-interconnect", }, { .compatible = "fsl,imx8m-noc", }, { .compatible = "fsl,imx8m-nic", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, imx_bus_of_match); static struct platform_driver imx_bus_platdrv = { .probe = imx_bus_probe, .driver = { .name = "imx-bus-devfreq", .of_match_table = imx_bus_of_match, }, }; module_platform_driver(imx_bus_platdrv); MODULE_DESCRIPTION("Generic i.MX bus frequency scaling driver"); MODULE_AUTHOR("Leonard Crestez <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/devfreq/imx-bus.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright (C) 2020-2021 Samuel Holland <[email protected]> // #include <linux/clk.h> #include <linux/devfreq.h> #include <linux/err.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/property.h> #define MBUS_CR 0x0000 #define MBUS_CR_GET_DRAM_TYPE(x) (((x) >> 16) & 0x7) #define MBUS_CR_DRAM_TYPE_DDR2 2 #define MBUS_CR_DRAM_TYPE_DDR3 3 #define MBUS_CR_DRAM_TYPE_DDR4 4 #define MBUS_CR_DRAM_TYPE_LPDDR2 6 #define MBUS_CR_DRAM_TYPE_LPDDR3 7 #define MBUS_TMR 0x000c #define MBUS_TMR_PERIOD(x) ((x) - 1) #define MBUS_PMU_CFG 0x009c #define MBUS_PMU_CFG_PERIOD(x) (((x) - 1) << 16) #define MBUS_PMU_CFG_UNIT (0x3 << 1) #define MBUS_PMU_CFG_UNIT_B (0x0 << 1) #define MBUS_PMU_CFG_UNIT_KB (0x1 << 1) #define MBUS_PMU_CFG_UNIT_MB (0x2 << 1) #define MBUS_PMU_CFG_ENABLE (0x1 << 0) #define MBUS_PMU_BWCR(n) (0x00a0 + (0x04 * (n))) #define MBUS_TOTAL_BWCR MBUS_PMU_BWCR(5) #define MBUS_TOTAL_BWCR_H616 MBUS_PMU_BWCR(13) #define MBUS_MDFSCR 0x0100 #define MBUS_MDFSCR_BUFFER_TIMING (0x1 << 15) #define MBUS_MDFSCR_PAD_HOLD (0x1 << 13) #define MBUS_MDFSCR_BYPASS (0x1 << 4) #define MBUS_MDFSCR_MODE (0x1 << 1) #define MBUS_MDFSCR_MODE_DFS (0x0 << 1) #define MBUS_MDFSCR_MODE_CFS (0x1 << 1) #define MBUS_MDFSCR_START (0x1 << 0) #define MBUS_MDFSMRMR 0x0108 #define DRAM_PWRCTL 0x0004 #define DRAM_PWRCTL_SELFREF_EN (0x1 << 0) #define DRAM_RFSHTMG 0x0090 #define DRAM_RFSHTMG_TREFI(x) ((x) << 16) #define DRAM_RFSHTMG_TRFC(x) ((x) << 0) #define DRAM_VTFCR 0x00b8 #define DRAM_VTFCR_VTF_ENABLE (0x3 << 8) #define DRAM_ODTMAP 0x0120 #define DRAM_DX_MAX 4 #define DRAM_DXnGCR0(n) (0x0344 + 0x80 * (n)) #define DRAM_DXnGCR0_DXODT (0x3 << 4) #define DRAM_DXnGCR0_DXODT_DYNAMIC (0x0 << 4) #define DRAM_DXnGCR0_DXODT_ENABLED (0x1 << 4) #define DRAM_DXnGCR0_DXODT_DISABLED (0x2 << 4) #define DRAM_DXnGCR0_DXEN (0x1 << 0) struct sun8i_a33_mbus_variant { u32 min_dram_divider; u32 max_dram_divider; u32 odt_freq_mhz; }; struct sun8i_a33_mbus { const struct sun8i_a33_mbus_variant *variant; void __iomem *reg_dram; void __iomem *reg_mbus; struct clk *clk_bus; struct clk *clk_dram; struct clk *clk_mbus; struct devfreq *devfreq_dram; struct devfreq_simple_ondemand_data gov_data; struct devfreq_dev_profile profile; u32 data_width; u32 nominal_bw; u32 odtmap; u32 tREFI_ns; u32 tRFC_ns; unsigned long freq_table[]; }; /* * The unit for this value is (MBUS clock cycles / MBUS_TMR_PERIOD). When * MBUS_TMR_PERIOD is programmed to match the MBUS clock frequency in MHz, as * it is during DRAM init and during probe, the resulting unit is microseconds. */ static int pmu_period = 50000; module_param(pmu_period, int, 0644); MODULE_PARM_DESC(pmu_period, "Bandwidth measurement period (microseconds)"); static u32 sun8i_a33_mbus_get_peak_bw(struct sun8i_a33_mbus *priv) { /* Returns the peak transfer (in KiB) during any single PMU period. */ return readl_relaxed(priv->reg_mbus + MBUS_TOTAL_BWCR); } static void sun8i_a33_mbus_restart_pmu_counters(struct sun8i_a33_mbus *priv) { u32 pmu_cfg = MBUS_PMU_CFG_PERIOD(pmu_period) | MBUS_PMU_CFG_UNIT_KB; /* All PMU counters are cleared on a disable->enable transition. */ writel_relaxed(pmu_cfg, priv->reg_mbus + MBUS_PMU_CFG); writel_relaxed(pmu_cfg | MBUS_PMU_CFG_ENABLE, priv->reg_mbus + MBUS_PMU_CFG); } static void sun8i_a33_mbus_update_nominal_bw(struct sun8i_a33_mbus *priv, u32 ddr_freq_mhz) { /* * Nominal bandwidth (KiB per PMU period): * * DDR transfers microseconds KiB * ------------- * ------------ * -------- * microsecond PMU period transfer */ priv->nominal_bw = ddr_freq_mhz * pmu_period * priv->data_width / 1024; } static int sun8i_a33_mbus_set_dram_freq(struct sun8i_a33_mbus *priv, unsigned long freq) { u32 ddr_freq_mhz = freq / USEC_PER_SEC; /* DDR */ u32 dram_freq_mhz = ddr_freq_mhz / 2; /* SDR */ u32 mctl_freq_mhz = dram_freq_mhz / 2; /* HDR */ u32 dxodt, mdfscr, pwrctl, vtfcr; u32 i, tREFI_32ck, tRFC_ck; int ret; /* The rate change is not effective until the MDFS process runs. */ ret = clk_set_rate(priv->clk_dram, freq); if (ret) return ret; /* Disable automatic self-refesh and VTF before starting MDFS. */ pwrctl = readl_relaxed(priv->reg_dram + DRAM_PWRCTL) & ~DRAM_PWRCTL_SELFREF_EN; writel_relaxed(pwrctl, priv->reg_dram + DRAM_PWRCTL); vtfcr = readl_relaxed(priv->reg_dram + DRAM_VTFCR); writel_relaxed(vtfcr & ~DRAM_VTFCR_VTF_ENABLE, priv->reg_dram + DRAM_VTFCR); /* Set up MDFS and enable double buffering for timing registers. */ mdfscr = MBUS_MDFSCR_MODE_DFS | MBUS_MDFSCR_BYPASS | MBUS_MDFSCR_PAD_HOLD | MBUS_MDFSCR_BUFFER_TIMING; writel(mdfscr, priv->reg_mbus + MBUS_MDFSCR); /* Update the buffered copy of RFSHTMG. */ tREFI_32ck = priv->tREFI_ns * mctl_freq_mhz / 1000 / 32; tRFC_ck = DIV_ROUND_UP(priv->tRFC_ns * mctl_freq_mhz, 1000); writel(DRAM_RFSHTMG_TREFI(tREFI_32ck) | DRAM_RFSHTMG_TRFC(tRFC_ck), priv->reg_dram + DRAM_RFSHTMG); /* Enable ODT if needed, or disable it to save power. */ if (priv->odtmap && dram_freq_mhz > priv->variant->odt_freq_mhz) { dxodt = DRAM_DXnGCR0_DXODT_DYNAMIC; writel(priv->odtmap, priv->reg_dram + DRAM_ODTMAP); } else { dxodt = DRAM_DXnGCR0_DXODT_DISABLED; writel(0, priv->reg_dram + DRAM_ODTMAP); } for (i = 0; i < DRAM_DX_MAX; ++i) { void __iomem *reg = priv->reg_dram + DRAM_DXnGCR0(i); writel((readl(reg) & ~DRAM_DXnGCR0_DXODT) | dxodt, reg); } dev_dbg(priv->devfreq_dram->dev.parent, "Setting DRAM to %u MHz, tREFI=%u, tRFC=%u, ODT=%s\n", dram_freq_mhz, tREFI_32ck, tRFC_ck, dxodt == DRAM_DXnGCR0_DXODT_DYNAMIC ? "dynamic" : "disabled"); /* Trigger hardware MDFS. */ writel(mdfscr | MBUS_MDFSCR_START, priv->reg_mbus + MBUS_MDFSCR); ret = readl_poll_timeout_atomic(priv->reg_mbus + MBUS_MDFSCR, mdfscr, !(mdfscr & MBUS_MDFSCR_START), 10, 1000); if (ret) return ret; /* Disable double buffering. */ writel(0, priv->reg_mbus + MBUS_MDFSCR); /* Restore VTF configuration. */ writel_relaxed(vtfcr, priv->reg_dram + DRAM_VTFCR); /* Enable automatic self-refresh at the lowest frequency only. */ if (freq == priv->freq_table[0]) pwrctl |= DRAM_PWRCTL_SELFREF_EN; writel_relaxed(pwrctl, priv->reg_dram + DRAM_PWRCTL); sun8i_a33_mbus_restart_pmu_counters(priv); sun8i_a33_mbus_update_nominal_bw(priv, ddr_freq_mhz); return 0; } static int sun8i_a33_mbus_set_dram_target(struct device *dev, unsigned long *freq, u32 flags) { struct sun8i_a33_mbus *priv = dev_get_drvdata(dev); struct devfreq *devfreq = priv->devfreq_dram; struct dev_pm_opp *opp; int ret; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); dev_pm_opp_put(opp); if (*freq == devfreq->previous_freq) return 0; ret = sun8i_a33_mbus_set_dram_freq(priv, *freq); if (ret) { dev_warn(dev, "failed to set DRAM frequency: %d\n", ret); *freq = devfreq->previous_freq; } return ret; } static int sun8i_a33_mbus_get_dram_status(struct device *dev, struct devfreq_dev_status *stat) { struct sun8i_a33_mbus *priv = dev_get_drvdata(dev); stat->busy_time = sun8i_a33_mbus_get_peak_bw(priv); stat->total_time = priv->nominal_bw; stat->current_frequency = priv->devfreq_dram->previous_freq; sun8i_a33_mbus_restart_pmu_counters(priv); dev_dbg(dev, "Using %lu/%lu (%lu%%) at %lu MHz\n", stat->busy_time, stat->total_time, DIV_ROUND_CLOSEST(stat->busy_time * 100, stat->total_time), stat->current_frequency / USEC_PER_SEC); return 0; } static int sun8i_a33_mbus_hw_init(struct device *dev, struct sun8i_a33_mbus *priv, unsigned long ddr_freq) { u32 i, mbus_cr, mbus_freq_mhz; /* Choose tREFI and tRFC to match the configured DRAM type. */ mbus_cr = readl_relaxed(priv->reg_mbus + MBUS_CR); switch (MBUS_CR_GET_DRAM_TYPE(mbus_cr)) { case MBUS_CR_DRAM_TYPE_DDR2: case MBUS_CR_DRAM_TYPE_DDR3: case MBUS_CR_DRAM_TYPE_DDR4: priv->tREFI_ns = 7800; priv->tRFC_ns = 350; break; case MBUS_CR_DRAM_TYPE_LPDDR2: case MBUS_CR_DRAM_TYPE_LPDDR3: priv->tREFI_ns = 3900; priv->tRFC_ns = 210; break; default: return -EINVAL; } /* Save ODTMAP so it can be restored when raising the frequency. */ priv->odtmap = readl_relaxed(priv->reg_dram + DRAM_ODTMAP); /* Compute the DRAM data bus width by counting enabled DATx8 blocks. */ for (i = 0; i < DRAM_DX_MAX; ++i) { void __iomem *reg = priv->reg_dram + DRAM_DXnGCR0(i); if (!(readl_relaxed(reg) & DRAM_DXnGCR0_DXEN)) break; } priv->data_width = i; dev_dbg(dev, "Detected %u-bit %sDDRx with%s ODT\n", priv->data_width * 8, MBUS_CR_GET_DRAM_TYPE(mbus_cr) > 4 ? "LP" : "", priv->odtmap ? "" : "out"); /* Program MBUS_TMR such that the PMU period unit is microseconds. */ mbus_freq_mhz = clk_get_rate(priv->clk_mbus) / USEC_PER_SEC; writel_relaxed(MBUS_TMR_PERIOD(mbus_freq_mhz), priv->reg_mbus + MBUS_TMR); /* "Master Ready Mask Register" bits must be set or MDFS will block. */ writel_relaxed(0xffffffff, priv->reg_mbus + MBUS_MDFSMRMR); sun8i_a33_mbus_restart_pmu_counters(priv); sun8i_a33_mbus_update_nominal_bw(priv, ddr_freq / USEC_PER_SEC); return 0; } static int __maybe_unused sun8i_a33_mbus_suspend(struct device *dev) { struct sun8i_a33_mbus *priv = dev_get_drvdata(dev); clk_disable_unprepare(priv->clk_bus); return 0; } static int __maybe_unused sun8i_a33_mbus_resume(struct device *dev) { struct sun8i_a33_mbus *priv = dev_get_drvdata(dev); return clk_prepare_enable(priv->clk_bus); } static int sun8i_a33_mbus_probe(struct platform_device *pdev) { const struct sun8i_a33_mbus_variant *variant; struct device *dev = &pdev->dev; struct sun8i_a33_mbus *priv; unsigned long base_freq; unsigned int max_state; const char *err; int i, ret; variant = device_get_match_data(dev); if (!variant) return -EINVAL; max_state = variant->max_dram_divider - variant->min_dram_divider + 1; priv = devm_kzalloc(dev, struct_size(priv, freq_table, max_state), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); priv->variant = variant; priv->reg_dram = devm_platform_ioremap_resource_byname(pdev, "dram"); if (IS_ERR(priv->reg_dram)) return PTR_ERR(priv->reg_dram); priv->reg_mbus = devm_platform_ioremap_resource_byname(pdev, "mbus"); if (IS_ERR(priv->reg_mbus)) return PTR_ERR(priv->reg_mbus); priv->clk_bus = devm_clk_get(dev, "bus"); if (IS_ERR(priv->clk_bus)) return dev_err_probe(dev, PTR_ERR(priv->clk_bus), "failed to get bus clock\n"); priv->clk_dram = devm_clk_get(dev, "dram"); if (IS_ERR(priv->clk_dram)) return dev_err_probe(dev, PTR_ERR(priv->clk_dram), "failed to get dram clock\n"); priv->clk_mbus = devm_clk_get(dev, "mbus"); if (IS_ERR(priv->clk_mbus)) return dev_err_probe(dev, PTR_ERR(priv->clk_mbus), "failed to get mbus clock\n"); ret = clk_prepare_enable(priv->clk_bus); if (ret) return dev_err_probe(dev, ret, "failed to enable bus clock\n"); /* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */ ret = clk_rate_exclusive_get(priv->clk_dram); if (ret) { err = "failed to lock dram clock rate\n"; goto err_disable_bus; } /* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */ ret = clk_rate_exclusive_get(priv->clk_mbus); if (ret) { err = "failed to lock mbus clock rate\n"; goto err_unlock_dram; } priv->gov_data.upthreshold = 10; priv->gov_data.downdifferential = 5; priv->profile.initial_freq = clk_get_rate(priv->clk_dram); priv->profile.polling_ms = 1000; priv->profile.target = sun8i_a33_mbus_set_dram_target; priv->profile.get_dev_status = sun8i_a33_mbus_get_dram_status; priv->profile.freq_table = priv->freq_table; priv->profile.max_state = max_state; ret = devm_pm_opp_set_clkname(dev, "dram"); if (ret) { err = "failed to add OPP table\n"; goto err_unlock_mbus; } base_freq = clk_get_rate(clk_get_parent(priv->clk_dram)); for (i = 0; i < max_state; ++i) { unsigned int div = variant->max_dram_divider - i; priv->freq_table[i] = base_freq / div; ret = dev_pm_opp_add(dev, priv->freq_table[i], 0); if (ret) { err = "failed to add OPPs\n"; goto err_remove_opps; } } ret = sun8i_a33_mbus_hw_init(dev, priv, priv->profile.initial_freq); if (ret) { err = "failed to init hardware\n"; goto err_remove_opps; } priv->devfreq_dram = devfreq_add_device(dev, &priv->profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, &priv->gov_data); if (IS_ERR(priv->devfreq_dram)) { ret = PTR_ERR(priv->devfreq_dram); err = "failed to add devfreq device\n"; goto err_remove_opps; } /* * This must be set manually after registering the devfreq device, * because there is no way to select a dynamic OPP as the suspend OPP. */ priv->devfreq_dram->suspend_freq = priv->freq_table[0]; return 0; err_remove_opps: dev_pm_opp_remove_all_dynamic(dev); err_unlock_mbus: clk_rate_exclusive_put(priv->clk_mbus); err_unlock_dram: clk_rate_exclusive_put(priv->clk_dram); err_disable_bus: clk_disable_unprepare(priv->clk_bus); return dev_err_probe(dev, ret, err); } static int sun8i_a33_mbus_remove(struct platform_device *pdev) { struct sun8i_a33_mbus *priv = platform_get_drvdata(pdev); unsigned long initial_freq = priv->profile.initial_freq; struct device *dev = &pdev->dev; int ret; devfreq_remove_device(priv->devfreq_dram); ret = sun8i_a33_mbus_set_dram_freq(priv, initial_freq); if (ret) dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret); dev_pm_opp_remove_all_dynamic(dev); clk_rate_exclusive_put(priv->clk_mbus); clk_rate_exclusive_put(priv->clk_dram); clk_disable_unprepare(priv->clk_bus); return 0; } static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = { .min_dram_divider = 1, .max_dram_divider = 4, .odt_freq_mhz = 400, }; static const struct of_device_id sun8i_a33_mbus_of_match[] = { { .compatible = "allwinner,sun50i-a64-mbus", .data = &sun50i_a64_mbus }, { .compatible = "allwinner,sun50i-h5-mbus", .data = &sun50i_a64_mbus }, { }, }; MODULE_DEVICE_TABLE(of, sun8i_a33_mbus_of_match); static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops, sun8i_a33_mbus_suspend, sun8i_a33_mbus_resume); static struct platform_driver sun8i_a33_mbus_driver = { .probe = sun8i_a33_mbus_probe, .remove = sun8i_a33_mbus_remove, .driver = { .name = "sun8i-a33-mbus", .of_match_table = sun8i_a33_mbus_of_match, .pm = pm_ptr(&sun8i_a33_mbus_pm_ops), }, }; module_platform_driver(sun8i_a33_mbus_driver); MODULE_AUTHOR("Samuel Holland <[email protected]>"); MODULE_DESCRIPTION("Allwinner sun8i/sun50i MBUS DEVFREQ Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/devfreq/sun8i-a33-mbus.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/devfreq/governor_userspace.c * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <[email protected]> */ #include <linux/slab.h> #include <linux/device.h> #include <linux/devfreq.h> #include <linux/pm.h> #include <linux/mutex.h> #include <linux/module.h> #include "governor.h" struct userspace_data { unsigned long user_frequency; bool valid; }; static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) { struct userspace_data *data = df->governor_data; if (data->valid) *freq = data->user_frequency; else *freq = df->previous_freq; /* No user freq specified yet */ return 0; } static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *devfreq = to_devfreq(dev); struct userspace_data *data; unsigned long wanted; int err = 0; mutex_lock(&devfreq->lock); data = devfreq->governor_data; sscanf(buf, "%lu", &wanted); data->user_frequency = wanted; data->valid = true; err = update_devfreq(devfreq); if (err == 0) err = count; mutex_unlock(&devfreq->lock); return err; } static ssize_t set_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *devfreq = to_devfreq(dev); struct userspace_data *data; int err = 0; mutex_lock(&devfreq->lock); data = devfreq->governor_data; if (data->valid) err = sprintf(buf, "%lu\n", data->user_frequency); else err = sprintf(buf, "undefined\n"); mutex_unlock(&devfreq->lock); return err; } static DEVICE_ATTR_RW(set_freq); static struct attribute *dev_entries[] = { &dev_attr_set_freq.attr, NULL, }; static const struct attribute_group dev_attr_group = { .name = DEVFREQ_GOV_USERSPACE, .attrs = dev_entries, }; static int userspace_init(struct devfreq *devfreq) { int err = 0; struct userspace_data *data = kzalloc(sizeof(struct userspace_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto out; } data->valid = false; devfreq->governor_data = data; err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); out: return err; } static void userspace_exit(struct devfreq *devfreq) { /* * Remove the sysfs entry, unless this is being called after * device_del(), which should have done this already via kobject_del(). */ if (devfreq->dev.kobj.sd) sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); kfree(devfreq->governor_data); devfreq->governor_data = NULL; } static int devfreq_userspace_handler(struct devfreq *devfreq, unsigned int event, void *data) { int ret = 0; switch (event) { case DEVFREQ_GOV_START: ret = userspace_init(devfreq); break; case DEVFREQ_GOV_STOP: userspace_exit(devfreq); break; default: break; } return ret; } static struct devfreq_governor devfreq_userspace = { .name = DEVFREQ_GOV_USERSPACE, .get_target_freq = devfreq_userspace_func, .event_handler = devfreq_userspace_handler, }; static int __init devfreq_userspace_init(void) { return devfreq_add_governor(&devfreq_userspace); } subsys_initcall(devfreq_userspace_init); static void __exit devfreq_userspace_exit(void) { int ret; ret = devfreq_remove_governor(&devfreq_userspace); if (ret) pr_err("%s: failed remove governor %d\n", __func__, ret); return; } module_exit(devfreq_userspace_exit); MODULE_LICENSE("GPL");
linux-master
drivers/devfreq/governor_userspace.c
// SPDX-License-Identifier: GPL-2.0-only /* * A devfreq driver for NVIDIA Tegra SoCs * * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved. * Copyright (C) 2014 Google, Inc */ #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/devfreq.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/reset.h> #include <linux/workqueue.h> #include <soc/tegra/fuse.h> #include "governor.h" #define ACTMON_GLB_STATUS 0x0 #define ACTMON_GLB_PERIOD_CTRL 0x4 #define ACTMON_DEV_CTRL 0x0 #define ACTMON_DEV_CTRL_K_VAL_SHIFT 10 #define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18) #define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20) #define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21) #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29) #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30) #define ACTMON_DEV_CTRL_ENB BIT(31) #define ACTMON_DEV_CTRL_STOP 0x00000000 #define ACTMON_DEV_UPPER_WMARK 0x4 #define ACTMON_DEV_LOWER_WMARK 0x8 #define ACTMON_DEV_INIT_AVG 0xc #define ACTMON_DEV_AVG_UPPER_WMARK 0x10 #define ACTMON_DEV_AVG_LOWER_WMARK 0x14 #define ACTMON_DEV_COUNT_WEIGHT 0x18 #define ACTMON_DEV_AVG_COUNT 0x20 #define ACTMON_DEV_INTR_STATUS 0x24 #define ACTMON_INTR_STATUS_CLEAR 0xffffffff #define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31) #define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30) #define ACTMON_ABOVE_WMARK_WINDOW 1 #define ACTMON_BELOW_WMARK_WINDOW 3 #define ACTMON_BOOST_FREQ_STEP 16000 /* * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128 */ #define ACTMON_AVERAGE_WINDOW_LOG2 6 #define ACTMON_SAMPLING_PERIOD 12 /* ms */ #define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */ #define KHZ 1000 #define KHZ_MAX (ULONG_MAX / KHZ) /* Assume that the bus is saturated if the utilization is 25% */ #define BUS_SATURATION_RATIO 25 /** * struct tegra_devfreq_device_config - configuration specific to an ACTMON * device * * Coefficients and thresholds are percentages unless otherwise noted */ struct tegra_devfreq_device_config { u32 offset; u32 irq_mask; /* Factors applied to boost_freq every consecutive watermark breach */ unsigned int boost_up_coeff; unsigned int boost_down_coeff; /* Define the watermark bounds when applied to the current avg */ unsigned int boost_up_threshold; unsigned int boost_down_threshold; /* * Threshold of activity (cycles translated to kHz) below which the * CPU frequency isn't to be taken into account. This is to avoid * increasing the EMC frequency when the CPU is very busy but not * accessing the bus often. */ u32 avg_dependency_threshold; }; enum tegra_actmon_device { MCALL = 0, MCCPU, }; static const struct tegra_devfreq_device_config tegra124_device_configs[] = { { /* MCALL: All memory accesses (including from the CPUs) */ .offset = 0x1c0, .irq_mask = 1 << 26, .boost_up_coeff = 200, .boost_down_coeff = 50, .boost_up_threshold = 60, .boost_down_threshold = 40, }, { /* MCCPU: memory accesses from the CPUs */ .offset = 0x200, .irq_mask = 1 << 25, .boost_up_coeff = 800, .boost_down_coeff = 40, .boost_up_threshold = 27, .boost_down_threshold = 10, .avg_dependency_threshold = 16000, /* 16MHz in kHz units */ }, }; static const struct tegra_devfreq_device_config tegra30_device_configs[] = { { /* MCALL: All memory accesses (including from the CPUs) */ .offset = 0x1c0, .irq_mask = 1 << 26, .boost_up_coeff = 200, .boost_down_coeff = 50, .boost_up_threshold = 20, .boost_down_threshold = 10, }, { /* MCCPU: memory accesses from the CPUs */ .offset = 0x200, .irq_mask = 1 << 25, .boost_up_coeff = 800, .boost_down_coeff = 40, .boost_up_threshold = 27, .boost_down_threshold = 10, .avg_dependency_threshold = 16000, /* 16MHz in kHz units */ }, }; /** * struct tegra_devfreq_device - state specific to an ACTMON device * * Frequencies are in kHz. */ struct tegra_devfreq_device { const struct tegra_devfreq_device_config *config; void __iomem *regs; /* Average event count sampled in the last interrupt */ u32 avg_count; /* * Extra frequency to increase the target by due to consecutive * watermark breaches. */ unsigned long boost_freq; /* Optimal frequency calculated from the stats for this device */ unsigned long target_freq; }; struct tegra_devfreq_soc_data { const struct tegra_devfreq_device_config *configs; /* Weight value for count measurements */ unsigned int count_weight; }; struct tegra_devfreq { struct devfreq *devfreq; struct reset_control *reset; struct clk *clock; void __iomem *regs; struct clk *emc_clock; unsigned long max_freq; unsigned long cur_freq; struct notifier_block clk_rate_change_nb; struct delayed_work cpufreq_update_work; struct notifier_block cpu_rate_change_nb; struct tegra_devfreq_device devices[2]; unsigned int irq; bool started; const struct tegra_devfreq_soc_data *soc; }; struct tegra_actmon_emc_ratio { unsigned long cpu_freq; unsigned long emc_freq; }; static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { { 1400000, KHZ_MAX }, { 1200000, 750000 }, { 1100000, 600000 }, { 1000000, 500000 }, { 800000, 375000 }, { 500000, 200000 }, { 250000, 100000 }, }; static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset) { return readl_relaxed(tegra->regs + offset); } static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset) { writel_relaxed(val, tegra->regs + offset); } static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset) { return readl_relaxed(dev->regs + offset); } static void device_writel(struct tegra_devfreq_device *dev, u32 val, u32 offset) { writel_relaxed(val, dev->regs + offset); } static unsigned long do_percent(unsigned long long val, unsigned int pct) { val = val * pct; do_div(val, 100); /* * High freq + high boosting percent + large polling interval are * resulting in integer overflow when watermarks are calculated. */ return min_t(u64, val, U32_MAX); } static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra, struct tegra_devfreq_device *dev) { u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ; u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms; u32 avg; avg = min(dev->avg_count, U32_MAX - band); device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK); avg = max(dev->avg_count, band); device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK); } static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra, struct tegra_devfreq_device *dev) { u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms; device_writel(dev, do_percent(val, dev->config->boost_up_threshold), ACTMON_DEV_UPPER_WMARK); device_writel(dev, do_percent(val, dev->config->boost_down_threshold), ACTMON_DEV_LOWER_WMARK); } static void actmon_isr_device(struct tegra_devfreq *tegra, struct tegra_devfreq_device *dev) { u32 intr_status, dev_ctrl; dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT); tegra_devfreq_update_avg_wmark(tegra, dev); intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS); dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL); if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) { /* * new_boost = min(old_boost * up_coef + step, max_freq) */ dev->boost_freq = do_percent(dev->boost_freq, dev->config->boost_up_coeff); dev->boost_freq += ACTMON_BOOST_FREQ_STEP; dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; if (dev->boost_freq >= tegra->max_freq) { dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; dev->boost_freq = tegra->max_freq; } } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) { /* * new_boost = old_boost * down_coef * or 0 if (old_boost * down_coef < step / 2) */ dev->boost_freq = do_percent(dev->boost_freq, dev->config->boost_down_coeff); dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) { dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; dev->boost_freq = 0; } } device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL); device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS); } static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra, unsigned long cpu_freq) { unsigned int i; const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios; for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) { if (cpu_freq >= ratio->cpu_freq) { if (ratio->emc_freq >= tegra->max_freq) return tegra->max_freq; else return ratio->emc_freq; } } return 0; } static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra, struct tegra_devfreq_device *dev) { unsigned int avg_sustain_coef; unsigned long target_freq; target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms; avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold; target_freq = do_percent(target_freq, avg_sustain_coef); return target_freq; } static void actmon_update_target(struct tegra_devfreq *tegra, struct tegra_devfreq_device *dev) { unsigned long cpu_freq = 0; unsigned long static_cpu_emc_freq = 0; dev->target_freq = actmon_device_target_freq(tegra, dev); if (dev->config->avg_dependency_threshold && dev->config->avg_dependency_threshold <= dev->target_freq) { cpu_freq = cpufreq_quick_get(0); static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq); dev->target_freq += dev->boost_freq; dev->target_freq = max(dev->target_freq, static_cpu_emc_freq); } else { dev->target_freq += dev->boost_freq; } } static irqreturn_t actmon_thread_isr(int irq, void *data) { struct tegra_devfreq *tegra = data; bool handled = false; unsigned int i; u32 val; mutex_lock(&tegra->devfreq->lock); val = actmon_readl(tegra, ACTMON_GLB_STATUS); for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { if (val & tegra->devices[i].config->irq_mask) { actmon_isr_device(tegra, tegra->devices + i); handled = true; } } if (handled) update_devfreq(tegra->devfreq); mutex_unlock(&tegra->devfreq->lock); return handled ? IRQ_HANDLED : IRQ_NONE; } static int tegra_actmon_clk_notify_cb(struct notifier_block *nb, unsigned long action, void *ptr) { struct clk_notifier_data *data = ptr; struct tegra_devfreq *tegra; struct tegra_devfreq_device *dev; unsigned int i; if (action != POST_RATE_CHANGE) return NOTIFY_OK; tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb); tegra->cur_freq = data->new_rate / KHZ; for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { dev = &tegra->devices[i]; tegra_devfreq_update_wmark(tegra, dev); } return NOTIFY_OK; } static void tegra_actmon_delayed_update(struct work_struct *work) { struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq, cpufreq_update_work.work); mutex_lock(&tegra->devfreq->lock); update_devfreq(tegra->devfreq); mutex_unlock(&tegra->devfreq->lock); } static unsigned long tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra, unsigned int cpu_freq) { struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU]; unsigned long static_cpu_emc_freq, dev_freq; dev_freq = actmon_device_target_freq(tegra, actmon_dev); /* check whether CPU's freq is taken into account at all */ if (dev_freq < actmon_dev->config->avg_dependency_threshold) return 0; static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq); if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq) return 0; return static_cpu_emc_freq; } static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb, unsigned long action, void *ptr) { struct cpufreq_freqs *freqs = ptr; struct tegra_devfreq *tegra; unsigned long old, new, delay; if (action != CPUFREQ_POSTCHANGE) return NOTIFY_OK; tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb); /* * Quickly check whether CPU frequency should be taken into account * at all, without blocking CPUFreq's core. */ if (mutex_trylock(&tegra->devfreq->lock)) { old = tegra_actmon_cpufreq_contribution(tegra, freqs->old); new = tegra_actmon_cpufreq_contribution(tegra, freqs->new); mutex_unlock(&tegra->devfreq->lock); /* * If CPU's frequency shouldn't be taken into account at * the moment, then there is no need to update the devfreq's * state because ISR will re-check CPU's frequency on the * next interrupt. */ if (old == new) return NOTIFY_OK; } /* * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order * to allow asynchronous notifications. This means we can't block * here for too long, otherwise CPUFreq's core will complain with a * warning splat. */ delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD); schedule_delayed_work(&tegra->cpufreq_update_work, delay); return NOTIFY_OK; } static void tegra_actmon_configure_device(struct tegra_devfreq *tegra, struct tegra_devfreq_device *dev) { u32 val = 0; /* reset boosting on governor's restart */ dev->boost_freq = 0; dev->target_freq = tegra->cur_freq; dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms; device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG); tegra_devfreq_update_avg_wmark(tegra, dev); tegra_devfreq_update_wmark(tegra, dev); device_writel(dev, tegra->soc->count_weight, ACTMON_DEV_COUNT_WEIGHT); device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS); val |= ACTMON_DEV_CTRL_ENB_PERIODIC; val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1) << ACTMON_DEV_CTRL_K_VAL_SHIFT; val |= (ACTMON_BELOW_WMARK_WINDOW - 1) << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT; val |= (ACTMON_ABOVE_WMARK_WINDOW - 1) << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT; val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN; val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN; val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; val |= ACTMON_DEV_CTRL_ENB; device_writel(dev, val, ACTMON_DEV_CTRL); } static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra) { struct tegra_devfreq_device *dev = tegra->devices; unsigned int i; for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) { device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL); device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS); } } static int tegra_actmon_resume(struct tegra_devfreq *tegra) { unsigned int i; int err; if (!tegra->devfreq->profile->polling_ms || !tegra->started) return 0; actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1, ACTMON_GLB_PERIOD_CTRL); /* * CLK notifications are needed in order to reconfigure the upper * consecutive watermark in accordance to the actual clock rate * to avoid unnecessary upper interrupts. */ err = clk_notifier_register(tegra->emc_clock, &tegra->clk_rate_change_nb); if (err) { dev_err(tegra->devfreq->dev.parent, "Failed to register rate change notifier\n"); return err; } tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ; for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) tegra_actmon_configure_device(tegra, &tegra->devices[i]); /* * We are estimating CPU's memory bandwidth requirement based on * amount of memory accesses and system's load, judging by CPU's * frequency. We also don't want to receive events about CPU's * frequency transaction when governor is stopped, hence notifier * is registered dynamically. */ err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb, CPUFREQ_TRANSITION_NOTIFIER); if (err) { dev_err(tegra->devfreq->dev.parent, "Failed to register rate change notifier: %d\n", err); goto err_stop; } enable_irq(tegra->irq); return 0; err_stop: tegra_actmon_stop_devices(tegra); clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb); return err; } static int tegra_actmon_start(struct tegra_devfreq *tegra) { int ret = 0; if (!tegra->started) { tegra->started = true; ret = tegra_actmon_resume(tegra); if (ret) tegra->started = false; } return ret; } static void tegra_actmon_pause(struct tegra_devfreq *tegra) { if (!tegra->devfreq->profile->polling_ms || !tegra->started) return; disable_irq(tegra->irq); cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb, CPUFREQ_TRANSITION_NOTIFIER); cancel_delayed_work_sync(&tegra->cpufreq_update_work); tegra_actmon_stop_devices(tegra); clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb); } static void tegra_actmon_stop(struct tegra_devfreq *tegra) { tegra_actmon_pause(tegra); tegra->started = false; } static int tegra_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct dev_pm_opp *opp; int ret; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) { dev_err(dev, "Failed to find opp for %lu Hz\n", *freq); return PTR_ERR(opp); } ret = dev_pm_opp_set_opp(dev, opp); dev_pm_opp_put(opp); return ret; } static int tegra_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct tegra_devfreq *tegra = dev_get_drvdata(dev); struct tegra_devfreq_device *actmon_dev; unsigned long cur_freq; cur_freq = READ_ONCE(tegra->cur_freq); /* To be used by the tegra governor */ stat->private_data = tegra; /* The below are to be used by the other governors */ stat->current_frequency = cur_freq * KHZ; actmon_dev = &tegra->devices[MCALL]; /* Number of cycles spent on memory access */ stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT); /* The bus can be considered to be saturated way before 100% */ stat->busy_time *= 100 / BUS_SATURATION_RATIO; /* Number of cycles in a sampling period */ stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq; stat->busy_time = min(stat->busy_time, stat->total_time); return 0; } static struct devfreq_dev_profile tegra_devfreq_profile = { .polling_ms = ACTMON_SAMPLING_PERIOD, .target = tegra_devfreq_target, .get_dev_status = tegra_devfreq_get_dev_status, .is_cooling_device = true, }; static int tegra_governor_get_target(struct devfreq *devfreq, unsigned long *freq) { struct devfreq_dev_status *stat; struct tegra_devfreq *tegra; struct tegra_devfreq_device *dev; unsigned long target_freq = 0; unsigned int i; int err; err = devfreq_update_stats(devfreq); if (err) return err; stat = &devfreq->last_status; tegra = stat->private_data; for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { dev = &tegra->devices[i]; actmon_update_target(tegra, dev); target_freq = max(target_freq, dev->target_freq); } /* * tegra-devfreq driver operates with KHz units, while OPP table * entries use Hz units. Hence we need to convert the units for the * devfreq core. */ *freq = target_freq * KHZ; return 0; } static int tegra_governor_event_handler(struct devfreq *devfreq, unsigned int event, void *data) { struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent); unsigned int *new_delay = data; int ret = 0; /* * Couple devfreq-device with the governor early because it is * needed at the moment of governor's start (used by ISR). */ tegra->devfreq = devfreq; switch (event) { case DEVFREQ_GOV_START: devfreq_monitor_start(devfreq); ret = tegra_actmon_start(tegra); break; case DEVFREQ_GOV_STOP: tegra_actmon_stop(tegra); devfreq_monitor_stop(devfreq); break; case DEVFREQ_GOV_UPDATE_INTERVAL: /* * ACTMON hardware supports up to 256 milliseconds for the * sampling period. */ if (*new_delay > 256) { ret = -EINVAL; break; } tegra_actmon_pause(tegra); devfreq_update_interval(devfreq, new_delay); ret = tegra_actmon_resume(tegra); break; case DEVFREQ_GOV_SUSPEND: tegra_actmon_stop(tegra); devfreq_monitor_suspend(devfreq); break; case DEVFREQ_GOV_RESUME: devfreq_monitor_resume(devfreq); ret = tegra_actmon_start(tegra); break; } return ret; } static struct devfreq_governor tegra_devfreq_governor = { .name = "tegra_actmon", .attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL, .flags = DEVFREQ_GOV_FLAG_IMMUTABLE | DEVFREQ_GOV_FLAG_IRQ_DRIVEN, .get_target_freq = tegra_governor_get_target, .event_handler = tegra_governor_event_handler, }; static void devm_tegra_devfreq_deinit_hw(void *data) { struct tegra_devfreq *tegra = data; reset_control_reset(tegra->reset); clk_disable_unprepare(tegra->clock); } static int devm_tegra_devfreq_init_hw(struct device *dev, struct tegra_devfreq *tegra) { int err; err = clk_prepare_enable(tegra->clock); if (err) { dev_err(dev, "Failed to prepare and enable ACTMON clock\n"); return err; } err = devm_add_action_or_reset(dev, devm_tegra_devfreq_deinit_hw, tegra); if (err) return err; err = reset_control_reset(tegra->reset); if (err) { dev_err(dev, "Failed to reset hardware: %d\n", err); return err; } return err; } static int tegra_devfreq_config_clks_nop(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, bool scaling_down) { /* We want to skip clk configuration via dev_pm_opp_set_opp() */ return 0; } static int tegra_devfreq_probe(struct platform_device *pdev) { u32 hw_version = BIT(tegra_sku_info.soc_speedo_id); struct tegra_devfreq_device *dev; struct tegra_devfreq *tegra; struct devfreq *devfreq; unsigned int i; long rate; int err; const char *clk_names[] = { "actmon", NULL }; struct dev_pm_opp_config config = { .supported_hw = &hw_version, .supported_hw_count = 1, .clk_names = clk_names, .config_clks = tegra_devfreq_config_clks_nop, }; tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL); if (!tegra) return -ENOMEM; tegra->soc = of_device_get_match_data(&pdev->dev); tegra->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tegra->regs)) return PTR_ERR(tegra->regs); tegra->reset = devm_reset_control_get(&pdev->dev, "actmon"); if (IS_ERR(tegra->reset)) { dev_err(&pdev->dev, "Failed to get reset\n"); return PTR_ERR(tegra->reset); } tegra->clock = devm_clk_get(&pdev->dev, "actmon"); if (IS_ERR(tegra->clock)) { dev_err(&pdev->dev, "Failed to get actmon clock\n"); return PTR_ERR(tegra->clock); } tegra->emc_clock = devm_clk_get(&pdev->dev, "emc"); if (IS_ERR(tegra->emc_clock)) return dev_err_probe(&pdev->dev, PTR_ERR(tegra->emc_clock), "Failed to get emc clock\n"); err = platform_get_irq(pdev, 0); if (err < 0) return err; tegra->irq = err; irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN); err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL, actmon_thread_isr, IRQF_ONESHOT, "tegra-devfreq", tegra); if (err) { dev_err(&pdev->dev, "Interrupt request failed: %d\n", err); return err; } err = devm_pm_opp_set_config(&pdev->dev, &config); if (err) { dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err); return err; } err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0); if (err) { dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err); return err; } err = devm_tegra_devfreq_init_hw(&pdev->dev, tegra); if (err) return err; rate = clk_round_rate(tegra->emc_clock, ULONG_MAX); if (rate <= 0) { dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate); return rate ?: -EINVAL; } tegra->max_freq = rate / KHZ; for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { dev = tegra->devices + i; dev->config = tegra->soc->configs + i; dev->regs = tegra->regs + dev->config->offset; } platform_set_drvdata(pdev, tegra); tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb; tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb; INIT_DELAYED_WORK(&tegra->cpufreq_update_work, tegra_actmon_delayed_update); err = devm_devfreq_add_governor(&pdev->dev, &tegra_devfreq_governor); if (err) { dev_err(&pdev->dev, "Failed to add governor: %d\n", err); return err; } tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock); devfreq = devm_devfreq_add_device(&pdev->dev, &tegra_devfreq_profile, "tegra_actmon", NULL); if (IS_ERR(devfreq)) { dev_err(&pdev->dev, "Failed to add device: %pe\n", devfreq); return PTR_ERR(devfreq); } return 0; } static const struct tegra_devfreq_soc_data tegra124_soc = { .configs = tegra124_device_configs, /* * Activity counter is incremented every 256 memory transactions, * and each transaction takes 4 EMC clocks. */ .count_weight = 4 * 256, }; static const struct tegra_devfreq_soc_data tegra30_soc = { .configs = tegra30_device_configs, .count_weight = 2 * 256, }; static const struct of_device_id tegra_devfreq_of_match[] = { { .compatible = "nvidia,tegra30-actmon", .data = &tegra30_soc, }, { .compatible = "nvidia,tegra124-actmon", .data = &tegra124_soc, }, { }, }; MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match); static struct platform_driver tegra_devfreq_driver = { .probe = tegra_devfreq_probe, .driver = { .name = "tegra-devfreq", .of_match_table = tegra_devfreq_of_match, }, }; module_platform_driver(tegra_devfreq_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Tegra devfreq driver"); MODULE_AUTHOR("Tomeu Vizoso <[email protected]>");
linux-master
drivers/devfreq/tegra30-devfreq.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/devfreq/governor_powersave.c * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <[email protected]> */ #include <linux/devfreq.h> #include <linux/module.h> #include "governor.h" static int devfreq_powersave_func(struct devfreq *df, unsigned long *freq) { /* * target callback should be able to get ceiling value as * said in devfreq.h */ *freq = DEVFREQ_MIN_FREQ; return 0; } static int devfreq_powersave_handler(struct devfreq *devfreq, unsigned int event, void *data) { int ret = 0; if (event == DEVFREQ_GOV_START) { mutex_lock(&devfreq->lock); ret = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); } return ret; } static struct devfreq_governor devfreq_powersave = { .name = DEVFREQ_GOV_POWERSAVE, .get_target_freq = devfreq_powersave_func, .event_handler = devfreq_powersave_handler, }; static int __init devfreq_powersave_init(void) { return devfreq_add_governor(&devfreq_powersave); } subsys_initcall(devfreq_powersave_init); static void __exit devfreq_powersave_exit(void) { int ret; ret = devfreq_remove_governor(&devfreq_powersave); if (ret) pr_err("%s: failed remove governor %d\n", __func__, ret); return; } module_exit(devfreq_powersave_exit); MODULE_LICENSE("GPL");
linux-master
drivers/devfreq/governor_powersave.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/devfreq/governor_simpleondemand.c * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <[email protected]> */ #include <linux/errno.h> #include <linux/module.h> #include <linux/devfreq.h> #include <linux/math64.h> #include "governor.h" /* Default constants for DevFreq-Simple-Ondemand (DFSO) */ #define DFSO_UPTHRESHOLD (90) #define DFSO_DOWNDIFFERENCTIAL (5) static int devfreq_simple_ondemand_func(struct devfreq *df, unsigned long *freq) { int err; struct devfreq_dev_status *stat; unsigned long long a, b; unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; struct devfreq_simple_ondemand_data *data = df->data; err = devfreq_update_stats(df); if (err) return err; stat = &df->last_status; if (data) { if (data->upthreshold) dfso_upthreshold = data->upthreshold; if (data->downdifferential) dfso_downdifferential = data->downdifferential; } if (dfso_upthreshold > 100 || dfso_upthreshold < dfso_downdifferential) return -EINVAL; /* Assume MAX if it is going to be divided by zero */ if (stat->total_time == 0) { *freq = DEVFREQ_MAX_FREQ; return 0; } /* Prevent overflow */ if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) { stat->busy_time >>= 7; stat->total_time >>= 7; } /* Set MAX if it's busy enough */ if (stat->busy_time * 100 > stat->total_time * dfso_upthreshold) { *freq = DEVFREQ_MAX_FREQ; return 0; } /* Set MAX if we do not know the initial frequency */ if (stat->current_frequency == 0) { *freq = DEVFREQ_MAX_FREQ; return 0; } /* Keep the current frequency */ if (stat->busy_time * 100 > stat->total_time * (dfso_upthreshold - dfso_downdifferential)) { *freq = stat->current_frequency; return 0; } /* Set the desired frequency based on the load */ a = stat->busy_time; a *= stat->current_frequency; b = div_u64(a, stat->total_time); b *= 100; b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); *freq = (unsigned long) b; return 0; } static int devfreq_simple_ondemand_handler(struct devfreq *devfreq, unsigned int event, void *data) { switch (event) { case DEVFREQ_GOV_START: devfreq_monitor_start(devfreq); break; case DEVFREQ_GOV_STOP: devfreq_monitor_stop(devfreq); break; case DEVFREQ_GOV_UPDATE_INTERVAL: devfreq_update_interval(devfreq, (unsigned int *)data); break; case DEVFREQ_GOV_SUSPEND: devfreq_monitor_suspend(devfreq); break; case DEVFREQ_GOV_RESUME: devfreq_monitor_resume(devfreq); break; default: break; } return 0; } static struct devfreq_governor devfreq_simple_ondemand = { .name = DEVFREQ_GOV_SIMPLE_ONDEMAND, .attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL | DEVFREQ_GOV_ATTR_TIMER, .get_target_freq = devfreq_simple_ondemand_func, .event_handler = devfreq_simple_ondemand_handler, }; static int __init devfreq_simple_ondemand_init(void) { return devfreq_add_governor(&devfreq_simple_ondemand); } subsys_initcall(devfreq_simple_ondemand_init); static void __exit devfreq_simple_ondemand_exit(void) { int ret; ret = devfreq_remove_governor(&devfreq_simple_ondemand); if (ret) pr_err("%s: failed remove governor %d\n", __func__, ret); return; } module_exit(devfreq_simple_ondemand_exit); MODULE_LICENSE("GPL");
linux-master
drivers/devfreq/governor_simpleondemand.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/devfreq/governor_performance.c * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <[email protected]> */ #include <linux/devfreq.h> #include <linux/module.h> #include "governor.h" static int devfreq_performance_func(struct devfreq *df, unsigned long *freq) { /* * target callback should be able to get floor value as * said in devfreq.h */ *freq = DEVFREQ_MAX_FREQ; return 0; } static int devfreq_performance_handler(struct devfreq *devfreq, unsigned int event, void *data) { int ret = 0; if (event == DEVFREQ_GOV_START) { mutex_lock(&devfreq->lock); ret = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); } return ret; } static struct devfreq_governor devfreq_performance = { .name = DEVFREQ_GOV_PERFORMANCE, .get_target_freq = devfreq_performance_func, .event_handler = devfreq_performance_handler, }; static int __init devfreq_performance_init(void) { return devfreq_add_governor(&devfreq_performance); } subsys_initcall(devfreq_performance_init); static void __exit devfreq_performance_exit(void) { int ret; ret = devfreq_remove_governor(&devfreq_performance); if (ret) pr_err("%s: failed remove governor %d\n", __func__, ret); return; } module_exit(devfreq_performance_exit); MODULE_LICENSE("GPL");
linux-master
drivers/devfreq/governor_performance.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic Exynos Bus frequency driver with DEVFREQ Framework * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <[email protected]> * * This driver support Exynos Bus frequency feature by using * DEVFREQ framework and is based on drivers/devfreq/exynos/exynos4_bus.c. */ #include <linux/clk.h> #include <linux/devfreq.h> #include <linux/devfreq-event.h> #include <linux/device.h> #include <linux/export.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #define DEFAULT_SATURATION_RATIO 40 struct exynos_bus { struct device *dev; struct platform_device *icc_pdev; struct devfreq *devfreq; struct devfreq_event_dev **edev; unsigned int edev_count; struct mutex lock; unsigned long curr_freq; int opp_token; struct clk *clk; unsigned int ratio; }; /* * Control the devfreq-event device to get the current state of bus */ #define exynos_bus_ops_edev(ops) \ static int exynos_bus_##ops(struct exynos_bus *bus) \ { \ int i, ret; \ \ for (i = 0; i < bus->edev_count; i++) { \ if (!bus->edev[i]) \ continue; \ ret = devfreq_event_##ops(bus->edev[i]); \ if (ret < 0) \ return ret; \ } \ \ return 0; \ } exynos_bus_ops_edev(enable_edev); exynos_bus_ops_edev(disable_edev); exynos_bus_ops_edev(set_event); static int exynos_bus_get_event(struct exynos_bus *bus, struct devfreq_event_data *edata) { struct devfreq_event_data event_data; unsigned long load_count = 0, total_count = 0; int i, ret = 0; for (i = 0; i < bus->edev_count; i++) { if (!bus->edev[i]) continue; ret = devfreq_event_get_event(bus->edev[i], &event_data); if (ret < 0) return ret; if (i == 0 || event_data.load_count > load_count) { load_count = event_data.load_count; total_count = event_data.total_count; } } edata->load_count = load_count; edata->total_count = total_count; return ret; } /* * devfreq function for both simple-ondemand and passive governor */ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags) { struct exynos_bus *bus = dev_get_drvdata(dev); struct dev_pm_opp *new_opp; int ret = 0; /* Get correct frequency for bus. */ new_opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(new_opp)) { dev_err(dev, "failed to get recommended opp instance\n"); return PTR_ERR(new_opp); } dev_pm_opp_put(new_opp); /* Change voltage and frequency according to new OPP level */ mutex_lock(&bus->lock); ret = dev_pm_opp_set_rate(dev, *freq); if (!ret) bus->curr_freq = *freq; mutex_unlock(&bus->lock); return ret; } static int exynos_bus_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct exynos_bus *bus = dev_get_drvdata(dev); struct devfreq_event_data edata; int ret; stat->current_frequency = bus->curr_freq; ret = exynos_bus_get_event(bus, &edata); if (ret < 0) { dev_err(dev, "failed to get event from devfreq-event devices\n"); stat->total_time = stat->busy_time = 0; goto err; } stat->busy_time = (edata.load_count * 100) / bus->ratio; stat->total_time = edata.total_count; dev_dbg(dev, "Usage of devfreq-event : %lu/%lu\n", stat->busy_time, stat->total_time); err: ret = exynos_bus_set_event(bus); if (ret < 0) { dev_err(dev, "failed to set event to devfreq-event devices\n"); return ret; } return ret; } static void exynos_bus_exit(struct device *dev) { struct exynos_bus *bus = dev_get_drvdata(dev); int ret; ret = exynos_bus_disable_edev(bus); if (ret < 0) dev_warn(dev, "failed to disable the devfreq-event devices\n"); platform_device_unregister(bus->icc_pdev); dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); dev_pm_opp_put_regulators(bus->opp_token); } static void exynos_bus_passive_exit(struct device *dev) { struct exynos_bus *bus = dev_get_drvdata(dev); platform_device_unregister(bus->icc_pdev); dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); } static int exynos_bus_parent_parse_of(struct device_node *np, struct exynos_bus *bus) { struct device *dev = bus->dev; const char *supplies[] = { "vdd", NULL }; int i, ret, count, size; ret = dev_pm_opp_set_regulators(dev, supplies); if (ret < 0) { dev_err(dev, "failed to set regulators %d\n", ret); return ret; } bus->opp_token = ret; /* * Get the devfreq-event devices to get the current utilization of * buses. This raw data will be used in devfreq ondemand governor. */ count = devfreq_event_get_edev_count(dev, "devfreq-events"); if (count < 0) { dev_err(dev, "failed to get the count of devfreq-event dev\n"); ret = count; goto err_regulator; } bus->edev_count = count; size = sizeof(*bus->edev) * count; bus->edev = devm_kzalloc(dev, size, GFP_KERNEL); if (!bus->edev) { ret = -ENOMEM; goto err_regulator; } for (i = 0; i < count; i++) { bus->edev[i] = devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i); if (IS_ERR(bus->edev[i])) { ret = -EPROBE_DEFER; goto err_regulator; } } /* * Optionally, Get the saturation ratio according to Exynos SoC * When measuring the utilization of each AXI bus with devfreq-event * devices, the measured real cycle might be much lower than the * total cycle of bus during sampling rate. In result, the devfreq * simple-ondemand governor might not decide to change the current * frequency due to too utilization (= real cycle/total cycle). * So, this property is used to adjust the utilization when calculating * the busy_time in exynos_bus_get_dev_status(). */ if (of_property_read_u32(np, "exynos,saturation-ratio", &bus->ratio)) bus->ratio = DEFAULT_SATURATION_RATIO; return 0; err_regulator: dev_pm_opp_put_regulators(bus->opp_token); return ret; } static int exynos_bus_parse_of(struct device_node *np, struct exynos_bus *bus) { struct device *dev = bus->dev; struct dev_pm_opp *opp; unsigned long rate; int ret; /* Get the clock to provide each bus with source clock */ bus->clk = devm_clk_get(dev, "bus"); if (IS_ERR(bus->clk)) { dev_err(dev, "failed to get bus clock\n"); return PTR_ERR(bus->clk); } ret = clk_prepare_enable(bus->clk); if (ret < 0) { dev_err(dev, "failed to get enable clock\n"); return ret; } /* Get the freq and voltage from OPP table to scale the bus freq */ ret = dev_pm_opp_of_add_table(dev); if (ret < 0) { dev_err(dev, "failed to get OPP table\n"); goto err_clk; } rate = clk_get_rate(bus->clk); opp = devfreq_recommended_opp(dev, &rate, 0); if (IS_ERR(opp)) { dev_err(dev, "failed to find dev_pm_opp\n"); ret = PTR_ERR(opp); goto err_opp; } bus->curr_freq = dev_pm_opp_get_freq(opp); dev_pm_opp_put(opp); return 0; err_opp: dev_pm_opp_of_remove_table(dev); err_clk: clk_disable_unprepare(bus->clk); return ret; } static int exynos_bus_profile_init(struct exynos_bus *bus, struct devfreq_dev_profile *profile) { struct device *dev = bus->dev; struct devfreq_simple_ondemand_data *ondemand_data; int ret; /* Initialize the struct profile and governor data for parent device */ profile->polling_ms = 50; profile->target = exynos_bus_target; profile->get_dev_status = exynos_bus_get_dev_status; profile->exit = exynos_bus_exit; ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL); if (!ondemand_data) return -ENOMEM; ondemand_data->upthreshold = 40; ondemand_data->downdifferential = 5; /* Add devfreq device to monitor and handle the exynos bus */ bus->devfreq = devm_devfreq_add_device(dev, profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, ondemand_data); if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq device\n"); return PTR_ERR(bus->devfreq); } /* Register opp_notifier to catch the change of OPP */ ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq); if (ret < 0) { dev_err(dev, "failed to register opp notifier\n"); return ret; } /* * Enable devfreq-event to get raw data which is used to determine * current bus load. */ ret = exynos_bus_enable_edev(bus); if (ret < 0) { dev_err(dev, "failed to enable devfreq-event devices\n"); return ret; } ret = exynos_bus_set_event(bus); if (ret < 0) { dev_err(dev, "failed to set event to devfreq-event devices\n"); goto err_edev; } return 0; err_edev: if (exynos_bus_disable_edev(bus)) dev_warn(dev, "failed to disable the devfreq-event devices\n"); return ret; } static int exynos_bus_profile_init_passive(struct exynos_bus *bus, struct devfreq_dev_profile *profile) { struct device *dev = bus->dev; struct devfreq_passive_data *passive_data; struct devfreq *parent_devfreq; /* Initialize the struct profile and governor data for passive device */ profile->target = exynos_bus_target; profile->exit = exynos_bus_passive_exit; /* Get the instance of parent devfreq device */ parent_devfreq = devfreq_get_devfreq_by_phandle(dev, "devfreq", 0); if (IS_ERR(parent_devfreq)) return -EPROBE_DEFER; passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL); if (!passive_data) return -ENOMEM; passive_data->parent = parent_devfreq; /* Add devfreq device for exynos bus with passive governor */ bus->devfreq = devm_devfreq_add_device(dev, profile, DEVFREQ_GOV_PASSIVE, passive_data); if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq dev with passive governor\n"); return PTR_ERR(bus->devfreq); } return 0; } static int exynos_bus_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node, *node; struct devfreq_dev_profile *profile; struct exynos_bus *bus; int ret, max_state; unsigned long min_freq, max_freq; bool passive = false; if (!np) { dev_err(dev, "failed to find devicetree node\n"); return -EINVAL; } bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); if (!bus) return -ENOMEM; mutex_init(&bus->lock); bus->dev = &pdev->dev; platform_set_drvdata(pdev, bus); profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); if (!profile) return -ENOMEM; node = of_parse_phandle(dev->of_node, "devfreq", 0); if (node) { of_node_put(node); passive = true; } else { ret = exynos_bus_parent_parse_of(np, bus); if (ret < 0) return ret; } /* Parse the device-tree to get the resource information */ ret = exynos_bus_parse_of(np, bus); if (ret < 0) goto err_reg; if (passive) ret = exynos_bus_profile_init_passive(bus, profile); else ret = exynos_bus_profile_init(bus, profile); if (ret < 0) goto err; /* Create child platform device for the interconnect provider */ if (of_property_present(dev->of_node, "#interconnect-cells")) { bus->icc_pdev = platform_device_register_data( dev, "exynos-generic-icc", PLATFORM_DEVID_AUTO, NULL, 0); if (IS_ERR(bus->icc_pdev)) { ret = PTR_ERR(bus->icc_pdev); goto err; } } max_state = bus->devfreq->max_state; min_freq = (bus->devfreq->freq_table[0] / 1000); max_freq = (bus->devfreq->freq_table[max_state - 1] / 1000); pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n", dev_name(dev), min_freq, max_freq); return 0; err: dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); err_reg: dev_pm_opp_put_regulators(bus->opp_token); return ret; } static void exynos_bus_shutdown(struct platform_device *pdev) { struct exynos_bus *bus = dev_get_drvdata(&pdev->dev); devfreq_suspend_device(bus->devfreq); } #ifdef CONFIG_PM_SLEEP static int exynos_bus_resume(struct device *dev) { struct exynos_bus *bus = dev_get_drvdata(dev); int ret; ret = exynos_bus_enable_edev(bus); if (ret < 0) { dev_err(dev, "failed to enable the devfreq-event devices\n"); return ret; } return 0; } static int exynos_bus_suspend(struct device *dev) { struct exynos_bus *bus = dev_get_drvdata(dev); int ret; ret = exynos_bus_disable_edev(bus); if (ret < 0) { dev_err(dev, "failed to disable the devfreq-event devices\n"); return ret; } return 0; } #endif static const struct dev_pm_ops exynos_bus_pm = { SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume) }; static const struct of_device_id exynos_bus_of_match[] = { { .compatible = "samsung,exynos-bus", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, exynos_bus_of_match); static struct platform_driver exynos_bus_platdrv = { .probe = exynos_bus_probe, .shutdown = exynos_bus_shutdown, .driver = { .name = "exynos-bus", .pm = &exynos_bus_pm, .of_match_table = exynos_bus_of_match, }, }; module_platform_driver(exynos_bus_platdrv); MODULE_SOFTDEP("pre: exynos_ppmu"); MODULE_DESCRIPTION("Generic Exynos Bus frequency driver"); MODULE_AUTHOR("Chanwoo Choi <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/devfreq/exynos-bus.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd. * Author: Lin Huang <[email protected]> */ #include <linux/arm-smccc.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/devfreq.h> #include <linux/devfreq-event.h> #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/rwsem.h> #include <linux/suspend.h> #include <soc/rockchip/pm_domains.h> #include <soc/rockchip/rk3399_grf.h> #include <soc/rockchip/rockchip_sip.h> #define NS_TO_CYCLE(NS, MHz) (((NS) * (MHz)) / NSEC_PER_USEC) #define RK3399_SET_ODT_PD_0_SR_IDLE GENMASK(7, 0) #define RK3399_SET_ODT_PD_0_SR_MC_GATE_IDLE GENMASK(15, 8) #define RK3399_SET_ODT_PD_0_STANDBY_IDLE GENMASK(31, 16) #define RK3399_SET_ODT_PD_1_PD_IDLE GENMASK(11, 0) #define RK3399_SET_ODT_PD_1_SRPD_LITE_IDLE GENMASK(27, 16) #define RK3399_SET_ODT_PD_2_ODT_ENABLE BIT(0) struct rk3399_dmcfreq { struct device *dev; struct devfreq *devfreq; struct devfreq_dev_profile profile; struct devfreq_simple_ondemand_data ondemand_data; struct clk *dmc_clk; struct devfreq_event_dev *edev; struct mutex lock; struct regulator *vdd_center; struct regmap *regmap_pmu; unsigned long rate, target_rate; unsigned long volt, target_volt; unsigned int odt_dis_freq; unsigned int pd_idle_ns; unsigned int sr_idle_ns; unsigned int sr_mc_gate_idle_ns; unsigned int srpd_lite_idle_ns; unsigned int standby_idle_ns; unsigned int ddr3_odt_dis_freq; unsigned int lpddr3_odt_dis_freq; unsigned int lpddr4_odt_dis_freq; unsigned int pd_idle_dis_freq; unsigned int sr_idle_dis_freq; unsigned int sr_mc_gate_idle_dis_freq; unsigned int srpd_lite_idle_dis_freq; unsigned int standby_idle_dis_freq; }; static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); struct dev_pm_opp *opp; unsigned long old_clk_rate = dmcfreq->rate; unsigned long target_volt, target_rate; unsigned int ddrcon_mhz; struct arm_smccc_res res; int err; u32 odt_pd_arg0 = 0; u32 odt_pd_arg1 = 0; u32 odt_pd_arg2 = 0; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); target_rate = dev_pm_opp_get_freq(opp); target_volt = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); if (dmcfreq->rate == target_rate) return 0; mutex_lock(&dmcfreq->lock); /* * Ensure power-domain transitions don't interfere with ARM Trusted * Firmware power-domain idling. */ err = rockchip_pmu_block(); if (err) { dev_err(dev, "Failed to block PMU: %d\n", err); goto out_unlock; } /* * Some idle parameters may be based on the DDR controller clock, which * is half of the DDR frequency. * pd_idle and standby_idle are based on the controller clock cycle. * sr_idle_cycle, sr_mc_gate_idle_cycle, and srpd_lite_idle_cycle * are based on the 1024 controller clock cycle */ ddrcon_mhz = target_rate / USEC_PER_SEC / 2; u32p_replace_bits(&odt_pd_arg1, NS_TO_CYCLE(dmcfreq->pd_idle_ns, ddrcon_mhz), RK3399_SET_ODT_PD_1_PD_IDLE); u32p_replace_bits(&odt_pd_arg0, NS_TO_CYCLE(dmcfreq->standby_idle_ns, ddrcon_mhz), RK3399_SET_ODT_PD_0_STANDBY_IDLE); u32p_replace_bits(&odt_pd_arg0, DIV_ROUND_UP(NS_TO_CYCLE(dmcfreq->sr_idle_ns, ddrcon_mhz), 1024), RK3399_SET_ODT_PD_0_SR_IDLE); u32p_replace_bits(&odt_pd_arg0, DIV_ROUND_UP(NS_TO_CYCLE(dmcfreq->sr_mc_gate_idle_ns, ddrcon_mhz), 1024), RK3399_SET_ODT_PD_0_SR_MC_GATE_IDLE); u32p_replace_bits(&odt_pd_arg1, DIV_ROUND_UP(NS_TO_CYCLE(dmcfreq->srpd_lite_idle_ns, ddrcon_mhz), 1024), RK3399_SET_ODT_PD_1_SRPD_LITE_IDLE); if (dmcfreq->regmap_pmu) { if (target_rate >= dmcfreq->sr_idle_dis_freq) odt_pd_arg0 &= ~RK3399_SET_ODT_PD_0_SR_IDLE; if (target_rate >= dmcfreq->sr_mc_gate_idle_dis_freq) odt_pd_arg0 &= ~RK3399_SET_ODT_PD_0_SR_MC_GATE_IDLE; if (target_rate >= dmcfreq->standby_idle_dis_freq) odt_pd_arg0 &= ~RK3399_SET_ODT_PD_0_STANDBY_IDLE; if (target_rate >= dmcfreq->pd_idle_dis_freq) odt_pd_arg1 &= ~RK3399_SET_ODT_PD_1_PD_IDLE; if (target_rate >= dmcfreq->srpd_lite_idle_dis_freq) odt_pd_arg1 &= ~RK3399_SET_ODT_PD_1_SRPD_LITE_IDLE; if (target_rate >= dmcfreq->odt_dis_freq) odt_pd_arg2 |= RK3399_SET_ODT_PD_2_ODT_ENABLE; /* * This makes a SMC call to the TF-A to set the DDR PD * (power-down) timings and to enable or disable the * ODT (on-die termination) resistors. */ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, odt_pd_arg0, odt_pd_arg1, ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD, odt_pd_arg2, 0, 0, 0, &res); } /* * If frequency scaling from low to high, adjust voltage first. * If frequency scaling from high to low, adjust frequency first. */ if (old_clk_rate < target_rate) { err = regulator_set_voltage(dmcfreq->vdd_center, target_volt, target_volt); if (err) { dev_err(dev, "Cannot set voltage %lu uV\n", target_volt); goto out; } } err = clk_set_rate(dmcfreq->dmc_clk, target_rate); if (err) { dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, err); regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt, dmcfreq->volt); goto out; } /* * Check the dpll rate, * There only two result we will get, * 1. Ddr frequency scaling fail, we still get the old rate. * 2. Ddr frequency scaling sucessful, we get the rate we set. */ dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk); /* If get the incorrect rate, set voltage to old value. */ if (dmcfreq->rate != target_rate) { dev_err(dev, "Got wrong frequency, Request %lu, Current %lu\n", target_rate, dmcfreq->rate); regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt, dmcfreq->volt); goto out; } else if (old_clk_rate > target_rate) err = regulator_set_voltage(dmcfreq->vdd_center, target_volt, target_volt); if (err) dev_err(dev, "Cannot set voltage %lu uV\n", target_volt); dmcfreq->rate = target_rate; dmcfreq->volt = target_volt; out: rockchip_pmu_unblock(); out_unlock: mutex_unlock(&dmcfreq->lock); return err; } static int rk3399_dmcfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); struct devfreq_event_data edata; int ret = 0; ret = devfreq_event_get_event(dmcfreq->edev, &edata); if (ret < 0) return ret; stat->current_frequency = dmcfreq->rate; stat->busy_time = edata.load_count; stat->total_time = edata.total_count; return ret; } static int rk3399_dmcfreq_get_cur_freq(struct device *dev, unsigned long *freq) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); *freq = dmcfreq->rate; return 0; } static __maybe_unused int rk3399_dmcfreq_suspend(struct device *dev) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); int ret = 0; ret = devfreq_event_disable_edev(dmcfreq->edev); if (ret < 0) { dev_err(dev, "failed to disable the devfreq-event devices\n"); return ret; } ret = devfreq_suspend_device(dmcfreq->devfreq); if (ret < 0) { dev_err(dev, "failed to suspend the devfreq devices\n"); return ret; } return 0; } static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); int ret = 0; ret = devfreq_event_enable_edev(dmcfreq->edev); if (ret < 0) { dev_err(dev, "failed to enable the devfreq-event devices\n"); return ret; } ret = devfreq_resume_device(dmcfreq->devfreq); if (ret < 0) { dev_err(dev, "failed to resume the devfreq devices\n"); return ret; } return ret; } static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend, rk3399_dmcfreq_resume); static int rk3399_dmcfreq_of_props(struct rk3399_dmcfreq *data, struct device_node *np) { int ret = 0; /* * These are all optional, and serve as minimum bounds. Give them large * (i.e., never "disabled") values if the DT doesn't specify one. */ data->pd_idle_dis_freq = data->sr_idle_dis_freq = data->sr_mc_gate_idle_dis_freq = data->srpd_lite_idle_dis_freq = data->standby_idle_dis_freq = UINT_MAX; ret |= of_property_read_u32(np, "rockchip,pd-idle-ns", &data->pd_idle_ns); ret |= of_property_read_u32(np, "rockchip,sr-idle-ns", &data->sr_idle_ns); ret |= of_property_read_u32(np, "rockchip,sr-mc-gate-idle-ns", &data->sr_mc_gate_idle_ns); ret |= of_property_read_u32(np, "rockchip,srpd-lite-idle-ns", &data->srpd_lite_idle_ns); ret |= of_property_read_u32(np, "rockchip,standby-idle-ns", &data->standby_idle_ns); ret |= of_property_read_u32(np, "rockchip,ddr3_odt_dis_freq", &data->ddr3_odt_dis_freq); ret |= of_property_read_u32(np, "rockchip,lpddr3_odt_dis_freq", &data->lpddr3_odt_dis_freq); ret |= of_property_read_u32(np, "rockchip,lpddr4_odt_dis_freq", &data->lpddr4_odt_dis_freq); ret |= of_property_read_u32(np, "rockchip,pd-idle-dis-freq-hz", &data->pd_idle_dis_freq); ret |= of_property_read_u32(np, "rockchip,sr-idle-dis-freq-hz", &data->sr_idle_dis_freq); ret |= of_property_read_u32(np, "rockchip,sr-mc-gate-idle-dis-freq-hz", &data->sr_mc_gate_idle_dis_freq); ret |= of_property_read_u32(np, "rockchip,srpd-lite-idle-dis-freq-hz", &data->srpd_lite_idle_dis_freq); ret |= of_property_read_u32(np, "rockchip,standby-idle-dis-freq-hz", &data->standby_idle_dis_freq); return ret; } static int rk3399_dmcfreq_probe(struct platform_device *pdev) { struct arm_smccc_res res; struct device *dev = &pdev->dev; struct device_node *np = pdev->dev.of_node, *node; struct rk3399_dmcfreq *data; int ret; struct dev_pm_opp *opp; u32 ddr_type; u32 val; data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL); if (!data) return -ENOMEM; mutex_init(&data->lock); data->vdd_center = devm_regulator_get(dev, "center"); if (IS_ERR(data->vdd_center)) return dev_err_probe(dev, PTR_ERR(data->vdd_center), "Cannot get the regulator \"center\"\n"); data->dmc_clk = devm_clk_get(dev, "dmc_clk"); if (IS_ERR(data->dmc_clk)) return dev_err_probe(dev, PTR_ERR(data->dmc_clk), "Cannot get the clk dmc_clk\n"); data->edev = devfreq_event_get_edev_by_phandle(dev, "devfreq-events", 0); if (IS_ERR(data->edev)) return -EPROBE_DEFER; ret = devfreq_event_enable_edev(data->edev); if (ret < 0) { dev_err(dev, "failed to enable devfreq-event devices\n"); return ret; } rk3399_dmcfreq_of_props(data, np); node = of_parse_phandle(np, "rockchip,pmu", 0); if (!node) goto no_pmu; data->regmap_pmu = syscon_node_to_regmap(node); of_node_put(node); if (IS_ERR(data->regmap_pmu)) { ret = PTR_ERR(data->regmap_pmu); goto err_edev; } regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) & RK3399_PMUGRF_DDRTYPE_MASK; switch (ddr_type) { case RK3399_PMUGRF_DDRTYPE_DDR3: data->odt_dis_freq = data->ddr3_odt_dis_freq; break; case RK3399_PMUGRF_DDRTYPE_LPDDR3: data->odt_dis_freq = data->lpddr3_odt_dis_freq; break; case RK3399_PMUGRF_DDRTYPE_LPDDR4: data->odt_dis_freq = data->lpddr4_odt_dis_freq; break; default: ret = -EINVAL; goto err_edev; } no_pmu: arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT, 0, 0, 0, 0, &res); /* * We add a devfreq driver to our parent since it has a device tree node * with operating points. */ if (devm_pm_opp_of_add_table(dev)) { dev_err(dev, "Invalid operating-points in device tree.\n"); ret = -EINVAL; goto err_edev; } data->ondemand_data.upthreshold = 25; data->ondemand_data.downdifferential = 15; data->rate = clk_get_rate(data->dmc_clk); opp = devfreq_recommended_opp(dev, &data->rate, 0); if (IS_ERR(opp)) { ret = PTR_ERR(opp); goto err_edev; } data->rate = dev_pm_opp_get_freq(opp); data->volt = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); data->profile = (struct devfreq_dev_profile) { .polling_ms = 200, .target = rk3399_dmcfreq_target, .get_dev_status = rk3399_dmcfreq_get_dev_status, .get_cur_freq = rk3399_dmcfreq_get_cur_freq, .initial_freq = data->rate, }; data->devfreq = devm_devfreq_add_device(dev, &data->profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, &data->ondemand_data); if (IS_ERR(data->devfreq)) { ret = PTR_ERR(data->devfreq); goto err_edev; } devm_devfreq_register_opp_notifier(dev, data->devfreq); data->dev = dev; platform_set_drvdata(pdev, data); return 0; err_edev: devfreq_event_disable_edev(data->edev); return ret; } static int rk3399_dmcfreq_remove(struct platform_device *pdev) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev); devfreq_event_disable_edev(dmcfreq->edev); return 0; } static const struct of_device_id rk3399dmc_devfreq_of_match[] = { { .compatible = "rockchip,rk3399-dmc" }, { }, }; MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match); static struct platform_driver rk3399_dmcfreq_driver = { .probe = rk3399_dmcfreq_probe, .remove = rk3399_dmcfreq_remove, .driver = { .name = "rk3399-dmc-freq", .pm = &rk3399_dmcfreq_pm, .of_match_table = rk3399dmc_devfreq_of_match, }, }; module_platform_driver(rk3399_dmcfreq_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Lin Huang <[email protected]>"); MODULE_DESCRIPTION("RK3399 dmcfreq driver with devfreq framework");
linux-master
drivers/devfreq/rk3399_dmc.c
// SPDX-License-Identifier: GPL-2.0-only /* * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework * for Non-CPU Devices. * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <[email protected]> */ #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/sched.h> #include <linux/debugfs.h> #include <linux/devfreq_cooling.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/pm_opp.h> #include <linux/devfreq.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/printk.h> #include <linux/hrtimer.h> #include <linux/of.h> #include <linux/pm_qos.h> #include <linux/units.h> #include "governor.h" #define CREATE_TRACE_POINTS #include <trace/events/devfreq.h> #define IS_SUPPORTED_FLAG(f, name) ((f & DEVFREQ_GOV_FLAG_##name) ? true : false) #define IS_SUPPORTED_ATTR(f, name) ((f & DEVFREQ_GOV_ATTR_##name) ? true : false) static struct class *devfreq_class; static struct dentry *devfreq_debugfs; /* * devfreq core provides delayed work based load monitoring helper * functions. Governors can use these or can implement their own * monitoring mechanism. */ static struct workqueue_struct *devfreq_wq; /* The list of all device-devfreq governors */ static LIST_HEAD(devfreq_governor_list); /* The list of all device-devfreq */ static LIST_HEAD(devfreq_list); static DEFINE_MUTEX(devfreq_list_lock); static const char timer_name[][DEVFREQ_NAME_LEN] = { [DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" }, [DEVFREQ_TIMER_DELAYED] = { "delayed" }, }; /** * find_device_devfreq() - find devfreq struct using device pointer * @dev: device pointer used to lookup device devfreq. * * Search the list of device devfreqs and return the matched device's * devfreq info. devfreq_list_lock should be held by the caller. */ static struct devfreq *find_device_devfreq(struct device *dev) { struct devfreq *tmp_devfreq; lockdep_assert_held(&devfreq_list_lock); if (IS_ERR_OR_NULL(dev)) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } list_for_each_entry(tmp_devfreq, &devfreq_list, node) { if (tmp_devfreq->dev.parent == dev) return tmp_devfreq; } return ERR_PTR(-ENODEV); } static unsigned long find_available_min_freq(struct devfreq *devfreq) { struct dev_pm_opp *opp; unsigned long min_freq = 0; opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq); if (IS_ERR(opp)) min_freq = 0; else dev_pm_opp_put(opp); return min_freq; } static unsigned long find_available_max_freq(struct devfreq *devfreq) { struct dev_pm_opp *opp; unsigned long max_freq = ULONG_MAX; opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq); if (IS_ERR(opp)) max_freq = 0; else dev_pm_opp_put(opp); return max_freq; } /** * devfreq_get_freq_range() - Get the current freq range * @devfreq: the devfreq instance * @min_freq: the min frequency * @max_freq: the max frequency * * This takes into consideration all constraints. */ void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq, unsigned long *max_freq) { unsigned long *freq_table = devfreq->freq_table; s32 qos_min_freq, qos_max_freq; lockdep_assert_held(&devfreq->lock); /* * Initialize minimum/maximum frequency from freq table. * The devfreq drivers can initialize this in either ascending or * descending order and devfreq core supports both. */ if (freq_table[0] < freq_table[devfreq->max_state - 1]) { *min_freq = freq_table[0]; *max_freq = freq_table[devfreq->max_state - 1]; } else { *min_freq = freq_table[devfreq->max_state - 1]; *max_freq = freq_table[0]; } /* Apply constraints from PM QoS */ qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent, DEV_PM_QOS_MIN_FREQUENCY); qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent, DEV_PM_QOS_MAX_FREQUENCY); *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq); if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE) *max_freq = min(*max_freq, (unsigned long)HZ_PER_KHZ * qos_max_freq); /* Apply constraints from OPP interface */ *min_freq = max(*min_freq, devfreq->scaling_min_freq); *max_freq = min(*max_freq, devfreq->scaling_max_freq); if (*min_freq > *max_freq) *min_freq = *max_freq; } EXPORT_SYMBOL(devfreq_get_freq_range); /** * devfreq_get_freq_level() - Lookup freq_table for the frequency * @devfreq: the devfreq instance * @freq: the target frequency */ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) { int lev; for (lev = 0; lev < devfreq->max_state; lev++) if (freq == devfreq->freq_table[lev]) return lev; return -EINVAL; } static int set_freq_table(struct devfreq *devfreq) { struct dev_pm_opp *opp; unsigned long freq; int i, count; /* Initialize the freq_table from OPP table */ count = dev_pm_opp_get_opp_count(devfreq->dev.parent); if (count <= 0) return -EINVAL; devfreq->max_state = count; devfreq->freq_table = devm_kcalloc(devfreq->dev.parent, devfreq->max_state, sizeof(*devfreq->freq_table), GFP_KERNEL); if (!devfreq->freq_table) return -ENOMEM; for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) { opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq); if (IS_ERR(opp)) { devm_kfree(devfreq->dev.parent, devfreq->freq_table); return PTR_ERR(opp); } dev_pm_opp_put(opp); devfreq->freq_table[i] = freq; } return 0; } /** * devfreq_update_status() - Update statistics of devfreq behavior * @devfreq: the devfreq instance * @freq: the update target frequency */ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) { int lev, prev_lev, ret = 0; u64 cur_time; lockdep_assert_held(&devfreq->lock); cur_time = get_jiffies_64(); /* Immediately exit if previous_freq is not initialized yet. */ if (!devfreq->previous_freq) goto out; prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); if (prev_lev < 0) { ret = prev_lev; goto out; } devfreq->stats.time_in_state[prev_lev] += cur_time - devfreq->stats.last_update; lev = devfreq_get_freq_level(devfreq, freq); if (lev < 0) { ret = lev; goto out; } if (lev != prev_lev) { devfreq->stats.trans_table[ (prev_lev * devfreq->max_state) + lev]++; devfreq->stats.total_trans++; } out: devfreq->stats.last_update = cur_time; return ret; } EXPORT_SYMBOL(devfreq_update_status); /** * find_devfreq_governor() - find devfreq governor from name * @name: name of the governor * * Search the list of devfreq governors and return the matched * governor's pointer. devfreq_list_lock should be held by the caller. */ static struct devfreq_governor *find_devfreq_governor(const char *name) { struct devfreq_governor *tmp_governor; lockdep_assert_held(&devfreq_list_lock); if (IS_ERR_OR_NULL(name)) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) return tmp_governor; } return ERR_PTR(-ENODEV); } /** * try_then_request_governor() - Try to find the governor and request the * module if is not found. * @name: name of the governor * * Search the list of devfreq governors and request the module and try again * if is not found. This can happen when both drivers (the governor driver * and the driver that call devfreq_add_device) are built as modules. * devfreq_list_lock should be held by the caller. Returns the matched * governor's pointer or an error pointer. */ static struct devfreq_governor *try_then_request_governor(const char *name) { struct devfreq_governor *governor; int err = 0; lockdep_assert_held(&devfreq_list_lock); if (IS_ERR_OR_NULL(name)) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } governor = find_devfreq_governor(name); if (IS_ERR(governor)) { mutex_unlock(&devfreq_list_lock); if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND, DEVFREQ_NAME_LEN)) err = request_module("governor_%s", "simpleondemand"); else err = request_module("governor_%s", name); /* Restore previous state before return */ mutex_lock(&devfreq_list_lock); if (err) return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL); governor = find_devfreq_governor(name); } return governor; } static int devfreq_notify_transition(struct devfreq *devfreq, struct devfreq_freqs *freqs, unsigned int state) { if (!devfreq) return -EINVAL; switch (state) { case DEVFREQ_PRECHANGE: srcu_notifier_call_chain(&devfreq->transition_notifier_list, DEVFREQ_PRECHANGE, freqs); break; case DEVFREQ_POSTCHANGE: srcu_notifier_call_chain(&devfreq->transition_notifier_list, DEVFREQ_POSTCHANGE, freqs); break; default: return -EINVAL; } return 0; } static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq, u32 flags) { struct devfreq_freqs freqs; unsigned long cur_freq; int err = 0; if (devfreq->profile->get_cur_freq) devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); else cur_freq = devfreq->previous_freq; freqs.old = cur_freq; freqs.new = new_freq; devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags); if (err) { freqs.new = cur_freq; devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); return err; } /* * Print devfreq_frequency trace information between DEVFREQ_PRECHANGE * and DEVFREQ_POSTCHANGE because for showing the correct frequency * change order of between devfreq device and passive devfreq device. */ if (trace_devfreq_frequency_enabled() && new_freq != cur_freq) trace_devfreq_frequency(devfreq, new_freq, cur_freq); freqs.new = new_freq; devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); if (devfreq_update_status(devfreq, new_freq)) dev_warn(&devfreq->dev, "Couldn't update frequency transition information.\n"); devfreq->previous_freq = new_freq; if (devfreq->suspend_freq) devfreq->resume_freq = new_freq; return err; } /** * devfreq_update_target() - Reevaluate the device and configure frequency * on the final stage. * @devfreq: the devfreq instance. * @freq: the new frequency of parent device. This argument * is only used for devfreq device using passive governor. * * Note: Lock devfreq->lock before calling devfreq_update_target. This function * should be only used by both update_devfreq() and devfreq governors. */ int devfreq_update_target(struct devfreq *devfreq, unsigned long freq) { unsigned long min_freq, max_freq; int err = 0; u32 flags = 0; lockdep_assert_held(&devfreq->lock); if (!devfreq->governor) return -EINVAL; /* Reevaluate the proper frequency */ err = devfreq->governor->get_target_freq(devfreq, &freq); if (err) return err; devfreq_get_freq_range(devfreq, &min_freq, &max_freq); if (freq < min_freq) { freq = min_freq; flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ } if (freq > max_freq) { freq = max_freq; flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ } return devfreq_set_target(devfreq, freq, flags); } EXPORT_SYMBOL(devfreq_update_target); /* Load monitoring helper functions for governors use */ /** * update_devfreq() - Reevaluate the device and configure frequency. * @devfreq: the devfreq instance. * * Note: Lock devfreq->lock before calling update_devfreq * This function is exported for governors. */ int update_devfreq(struct devfreq *devfreq) { return devfreq_update_target(devfreq, 0L); } EXPORT_SYMBOL(update_devfreq); /** * devfreq_monitor() - Periodically poll devfreq objects. * @work: the work struct used to run devfreq_monitor periodically. * */ static void devfreq_monitor(struct work_struct *work) { int err; struct devfreq *devfreq = container_of(work, struct devfreq, work.work); mutex_lock(&devfreq->lock); err = update_devfreq(devfreq); if (err) dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); mutex_unlock(&devfreq->lock); trace_devfreq_monitor(devfreq); } /** * devfreq_monitor_start() - Start load monitoring of devfreq instance * @devfreq: the devfreq instance. * * Helper function for starting devfreq device load monitoring. By default, * deferrable timer is used for load monitoring. But the users can change this * behavior using the "timer" type in devfreq_dev_profile. This function will be * called by devfreq governor in response to the DEVFREQ_GOV_START event * generated while adding a device to the devfreq framework. */ void devfreq_monitor_start(struct devfreq *devfreq) { if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) return; switch (devfreq->profile->timer) { case DEVFREQ_TIMER_DEFERRABLE: INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); break; case DEVFREQ_TIMER_DELAYED: INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor); break; default: return; } if (devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); } EXPORT_SYMBOL(devfreq_monitor_start); /** * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance * @devfreq: the devfreq instance. * * Helper function to stop devfreq device load monitoring. Function * to be called from governor in response to DEVFREQ_GOV_STOP * event when device is removed from devfreq framework. */ void devfreq_monitor_stop(struct devfreq *devfreq) { if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) return; cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_stop); /** * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance * @devfreq: the devfreq instance. * * Helper function to suspend devfreq device load monitoring. Function * to be called from governor in response to DEVFREQ_GOV_SUSPEND * event or when polling interval is set to zero. * * Note: Though this function is same as devfreq_monitor_stop(), * intentionally kept separate to provide hooks for collecting * transition statistics. */ void devfreq_monitor_suspend(struct devfreq *devfreq) { mutex_lock(&devfreq->lock); if (devfreq->stop_polling) { mutex_unlock(&devfreq->lock); return; } devfreq_update_status(devfreq, devfreq->previous_freq); devfreq->stop_polling = true; mutex_unlock(&devfreq->lock); if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) return; cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_suspend); /** * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance * @devfreq: the devfreq instance. * * Helper function to resume devfreq device load monitoring. Function * to be called from governor in response to DEVFREQ_GOV_RESUME * event or when polling interval is set to non-zero. */ void devfreq_monitor_resume(struct devfreq *devfreq) { unsigned long freq; mutex_lock(&devfreq->lock); if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) goto out_update; if (!devfreq->stop_polling) goto out; if (!delayed_work_pending(&devfreq->work) && devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); out_update: devfreq->stats.last_update = get_jiffies_64(); devfreq->stop_polling = false; if (devfreq->profile->get_cur_freq && !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) devfreq->previous_freq = freq; out: mutex_unlock(&devfreq->lock); } EXPORT_SYMBOL(devfreq_monitor_resume); /** * devfreq_update_interval() - Update device devfreq monitoring interval * @devfreq: the devfreq instance. * @delay: new polling interval to be set. * * Helper function to set new load monitoring polling interval. Function * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event. */ void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay) { unsigned int cur_delay = devfreq->profile->polling_ms; unsigned int new_delay = *delay; mutex_lock(&devfreq->lock); devfreq->profile->polling_ms = new_delay; if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) goto out; if (devfreq->stop_polling) goto out; /* if new delay is zero, stop polling */ if (!new_delay) { mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); return; } /* if current delay is zero, start polling with new delay */ if (!cur_delay) { queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); goto out; } /* if current delay is greater than new delay, restart polling */ if (cur_delay > new_delay) { mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); mutex_lock(&devfreq->lock); if (!devfreq->stop_polling) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); } out: mutex_unlock(&devfreq->lock); } EXPORT_SYMBOL(devfreq_update_interval); /** * devfreq_notifier_call() - Notify that the device frequency requirements * has been changed out of devfreq framework. * @nb: the notifier_block (supposed to be devfreq->nb) * @type: not used * @devp: not used * * Called by a notifier that uses devfreq->nb. */ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, void *devp) { struct devfreq *devfreq = container_of(nb, struct devfreq, nb); int err = -EINVAL; mutex_lock(&devfreq->lock); devfreq->scaling_min_freq = find_available_min_freq(devfreq); if (!devfreq->scaling_min_freq) goto out; devfreq->scaling_max_freq = find_available_max_freq(devfreq); if (!devfreq->scaling_max_freq) { devfreq->scaling_max_freq = ULONG_MAX; goto out; } err = update_devfreq(devfreq); out: mutex_unlock(&devfreq->lock); if (err) dev_err(devfreq->dev.parent, "failed to update frequency from OPP notifier (%d)\n", err); return NOTIFY_OK; } /** * qos_notifier_call() - Common handler for QoS constraints. * @devfreq: the devfreq instance. */ static int qos_notifier_call(struct devfreq *devfreq) { int err; mutex_lock(&devfreq->lock); err = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); if (err) dev_err(devfreq->dev.parent, "failed to update frequency from PM QoS (%d)\n", err); return NOTIFY_OK; } /** * qos_min_notifier_call() - Callback for QoS min_freq changes. * @nb: Should be devfreq->nb_min * @val: not used * @ptr: not used */ static int qos_min_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr) { return qos_notifier_call(container_of(nb, struct devfreq, nb_min)); } /** * qos_max_notifier_call() - Callback for QoS max_freq changes. * @nb: Should be devfreq->nb_max * @val: not used * @ptr: not used */ static int qos_max_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr) { return qos_notifier_call(container_of(nb, struct devfreq, nb_max)); } /** * devfreq_dev_release() - Callback for struct device to release the device. * @dev: the devfreq device * * Remove devfreq from the list and release its resources. */ static void devfreq_dev_release(struct device *dev) { struct devfreq *devfreq = to_devfreq(dev); int err; mutex_lock(&devfreq_list_lock); list_del(&devfreq->node); mutex_unlock(&devfreq_list_lock); err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max, DEV_PM_QOS_MAX_FREQUENCY); if (err && err != -ENOENT) dev_warn(dev->parent, "Failed to remove max_freq notifier: %d\n", err); err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min, DEV_PM_QOS_MIN_FREQUENCY); if (err && err != -ENOENT) dev_warn(dev->parent, "Failed to remove min_freq notifier: %d\n", err); if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) { err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req); if (err < 0) dev_warn(dev->parent, "Failed to remove max_freq request: %d\n", err); } if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) { err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req); if (err < 0) dev_warn(dev->parent, "Failed to remove min_freq request: %d\n", err); } if (devfreq->profile->exit) devfreq->profile->exit(devfreq->dev.parent); if (devfreq->opp_table) dev_pm_opp_put_opp_table(devfreq->opp_table); mutex_destroy(&devfreq->lock); srcu_cleanup_notifier_head(&devfreq->transition_notifier_list); kfree(devfreq); } static void create_sysfs_files(struct devfreq *devfreq, const struct devfreq_governor *gov); static void remove_sysfs_files(struct devfreq *devfreq, const struct devfreq_governor *gov); /** * devfreq_add_device() - Add devfreq feature to the device * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. * @data: devfreq driver pass to governors, governor should not change it. */ struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data) { struct devfreq *devfreq; struct devfreq_governor *governor; unsigned long min_freq, max_freq; int err = 0; if (!dev || !profile || !governor_name) { dev_err(dev, "%s: Invalid parameters.\n", __func__); return ERR_PTR(-EINVAL); } mutex_lock(&devfreq_list_lock); devfreq = find_device_devfreq(dev); mutex_unlock(&devfreq_list_lock); if (!IS_ERR(devfreq)) { dev_err(dev, "%s: devfreq device already exists!\n", __func__); err = -EINVAL; goto err_out; } devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); if (!devfreq) { err = -ENOMEM; goto err_out; } mutex_init(&devfreq->lock); mutex_lock(&devfreq->lock); devfreq->dev.parent = dev; devfreq->dev.class = devfreq_class; devfreq->dev.release = devfreq_dev_release; INIT_LIST_HEAD(&devfreq->node); devfreq->profile = profile; devfreq->previous_freq = profile->initial_freq; devfreq->last_status.current_frequency = profile->initial_freq; devfreq->data = data; devfreq->nb.notifier_call = devfreq_notifier_call; if (devfreq->profile->timer < 0 || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) { mutex_unlock(&devfreq->lock); err = -EINVAL; goto err_dev; } if (!devfreq->profile->max_state || !devfreq->profile->freq_table) { mutex_unlock(&devfreq->lock); err = set_freq_table(devfreq); if (err < 0) goto err_dev; mutex_lock(&devfreq->lock); } else { devfreq->freq_table = devfreq->profile->freq_table; devfreq->max_state = devfreq->profile->max_state; } devfreq->scaling_min_freq = find_available_min_freq(devfreq); if (!devfreq->scaling_min_freq) { mutex_unlock(&devfreq->lock); err = -EINVAL; goto err_dev; } devfreq->scaling_max_freq = find_available_max_freq(devfreq); if (!devfreq->scaling_max_freq) { mutex_unlock(&devfreq->lock); err = -EINVAL; goto err_dev; } devfreq_get_freq_range(devfreq, &min_freq, &max_freq); devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); devfreq->opp_table = dev_pm_opp_get_opp_table(dev); if (IS_ERR(devfreq->opp_table)) devfreq->opp_table = NULL; atomic_set(&devfreq->suspend_count, 0); dev_set_name(&devfreq->dev, "%s", dev_name(dev)); err = device_register(&devfreq->dev); if (err) { mutex_unlock(&devfreq->lock); put_device(&devfreq->dev); goto err_out; } devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev, array3_size(sizeof(unsigned int), devfreq->max_state, devfreq->max_state), GFP_KERNEL); if (!devfreq->stats.trans_table) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev, devfreq->max_state, sizeof(*devfreq->stats.time_in_state), GFP_KERNEL); if (!devfreq->stats.time_in_state) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } devfreq->stats.total_trans = 0; devfreq->stats.last_update = get_jiffies_64(); srcu_init_notifier_head(&devfreq->transition_notifier_list); mutex_unlock(&devfreq->lock); err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req, DEV_PM_QOS_MIN_FREQUENCY, 0); if (err < 0) goto err_devfreq; err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req, DEV_PM_QOS_MAX_FREQUENCY, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); if (err < 0) goto err_devfreq; devfreq->nb_min.notifier_call = qos_min_notifier_call; err = dev_pm_qos_add_notifier(dev, &devfreq->nb_min, DEV_PM_QOS_MIN_FREQUENCY); if (err) goto err_devfreq; devfreq->nb_max.notifier_call = qos_max_notifier_call; err = dev_pm_qos_add_notifier(dev, &devfreq->nb_max, DEV_PM_QOS_MAX_FREQUENCY); if (err) goto err_devfreq; mutex_lock(&devfreq_list_lock); governor = try_then_request_governor(governor_name); if (IS_ERR(governor)) { dev_err(dev, "%s: Unable to find governor for the device\n", __func__); err = PTR_ERR(governor); goto err_init; } devfreq->governor = governor; err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL); if (err) { dev_err_probe(dev, err, "%s: Unable to start governor for the device\n", __func__); goto err_init; } create_sysfs_files(devfreq, devfreq->governor); list_add(&devfreq->node, &devfreq_list); mutex_unlock(&devfreq_list_lock); if (devfreq->profile->is_cooling_device) { devfreq->cdev = devfreq_cooling_em_register(devfreq, NULL); if (IS_ERR(devfreq->cdev)) devfreq->cdev = NULL; } return devfreq; err_init: mutex_unlock(&devfreq_list_lock); err_devfreq: devfreq_remove_device(devfreq); devfreq = NULL; err_dev: kfree(devfreq); err_out: return ERR_PTR(err); } EXPORT_SYMBOL(devfreq_add_device); /** * devfreq_remove_device() - Remove devfreq feature from a device. * @devfreq: the devfreq instance to be removed * * The opposite of devfreq_add_device(). */ int devfreq_remove_device(struct devfreq *devfreq) { if (!devfreq) return -EINVAL; devfreq_cooling_unregister(devfreq->cdev); if (devfreq->governor) { devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); remove_sysfs_files(devfreq, devfreq->governor); } device_unregister(&devfreq->dev); return 0; } EXPORT_SYMBOL(devfreq_remove_device); static int devm_devfreq_dev_match(struct device *dev, void *res, void *data) { struct devfreq **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; } static void devm_devfreq_dev_release(struct device *dev, void *res) { devfreq_remove_device(*(struct devfreq **)res); } /** * devm_devfreq_add_device() - Resource-managed devfreq_add_device() * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. * @data: devfreq driver pass to governors, governor should not change it. * * This function manages automatically the memory of devfreq device using device * resource management and simplify the free operation for memory of devfreq * device. */ struct devfreq *devm_devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data) { struct devfreq **ptr, *devfreq; ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); devfreq = devfreq_add_device(dev, profile, governor_name, data); if (IS_ERR(devfreq)) { devres_free(ptr); return devfreq; } *ptr = devfreq; devres_add(dev, ptr); return devfreq; } EXPORT_SYMBOL(devm_devfreq_add_device); #ifdef CONFIG_OF /* * devfreq_get_devfreq_by_node - Get the devfreq device from devicetree * @node - pointer to device_node * * return the instance of devfreq device */ struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node) { struct devfreq *devfreq; if (!node) return ERR_PTR(-EINVAL); mutex_lock(&devfreq_list_lock); list_for_each_entry(devfreq, &devfreq_list, node) { if (devfreq->dev.parent && device_match_of_node(devfreq->dev.parent, node)) { mutex_unlock(&devfreq_list_lock); return devfreq; } } mutex_unlock(&devfreq_list_lock); return ERR_PTR(-ENODEV); } /* * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree * @dev - instance to the given device * @phandle_name - name of property holding a phandle value * @index - index into list of devfreq * * return the instance of devfreq device */ struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, const char *phandle_name, int index) { struct device_node *node; struct devfreq *devfreq; if (!dev || !phandle_name) return ERR_PTR(-EINVAL); if (!dev->of_node) return ERR_PTR(-EINVAL); node = of_parse_phandle(dev->of_node, phandle_name, index); if (!node) return ERR_PTR(-ENODEV); devfreq = devfreq_get_devfreq_by_node(node); of_node_put(node); return devfreq; } #else struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node) { return ERR_PTR(-ENODEV); } struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, const char *phandle_name, int index) { return ERR_PTR(-ENODEV); } #endif /* CONFIG_OF */ EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_node); EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle); /** * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device() * @dev: the device from which to remove devfreq feature. * @devfreq: the devfreq instance to be removed */ void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) { WARN_ON(devres_release(dev, devm_devfreq_dev_release, devm_devfreq_dev_match, devfreq)); } EXPORT_SYMBOL(devm_devfreq_remove_device); /** * devfreq_suspend_device() - Suspend devfreq of a device. * @devfreq: the devfreq instance to be suspended * * This function is intended to be called by the pm callbacks * (e.g., runtime_suspend, suspend) of the device driver that * holds the devfreq. */ int devfreq_suspend_device(struct devfreq *devfreq) { int ret; if (!devfreq) return -EINVAL; if (atomic_inc_return(&devfreq->suspend_count) > 1) return 0; if (devfreq->governor) { ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_SUSPEND, NULL); if (ret) return ret; } if (devfreq->suspend_freq) { mutex_lock(&devfreq->lock); ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0); mutex_unlock(&devfreq->lock); if (ret) return ret; } return 0; } EXPORT_SYMBOL(devfreq_suspend_device); /** * devfreq_resume_device() - Resume devfreq of a device. * @devfreq: the devfreq instance to be resumed * * This function is intended to be called by the pm callbacks * (e.g., runtime_resume, resume) of the device driver that * holds the devfreq. */ int devfreq_resume_device(struct devfreq *devfreq) { int ret; if (!devfreq) return -EINVAL; if (atomic_dec_return(&devfreq->suspend_count) >= 1) return 0; if (devfreq->resume_freq) { mutex_lock(&devfreq->lock); ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0); mutex_unlock(&devfreq->lock); if (ret) return ret; } if (devfreq->governor) { ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_RESUME, NULL); if (ret) return ret; } return 0; } EXPORT_SYMBOL(devfreq_resume_device); /** * devfreq_suspend() - Suspend devfreq governors and devices * * Called during system wide Suspend/Hibernate cycles for suspending governors * and devices preserving the state for resume. On some platforms the devfreq * device must have precise state (frequency) after resume in order to provide * fully operating setup. */ void devfreq_suspend(void) { struct devfreq *devfreq; int ret; mutex_lock(&devfreq_list_lock); list_for_each_entry(devfreq, &devfreq_list, node) { ret = devfreq_suspend_device(devfreq); if (ret) dev_err(&devfreq->dev, "failed to suspend devfreq device\n"); } mutex_unlock(&devfreq_list_lock); } /** * devfreq_resume() - Resume devfreq governors and devices * * Called during system wide Suspend/Hibernate cycle for resuming governors and * devices that are suspended with devfreq_suspend(). */ void devfreq_resume(void) { struct devfreq *devfreq; int ret; mutex_lock(&devfreq_list_lock); list_for_each_entry(devfreq, &devfreq_list, node) { ret = devfreq_resume_device(devfreq); if (ret) dev_warn(&devfreq->dev, "failed to resume devfreq device\n"); } mutex_unlock(&devfreq_list_lock); } /** * devfreq_add_governor() - Add devfreq governor * @governor: the devfreq governor to be added */ int devfreq_add_governor(struct devfreq_governor *governor) { struct devfreq_governor *g; struct devfreq *devfreq; int err = 0; if (!governor) { pr_err("%s: Invalid parameters.\n", __func__); return -EINVAL; } mutex_lock(&devfreq_list_lock); g = find_devfreq_governor(governor->name); if (!IS_ERR(g)) { pr_err("%s: governor %s already registered\n", __func__, g->name); err = -EINVAL; goto err_out; } list_add(&governor->node, &devfreq_governor_list); list_for_each_entry(devfreq, &devfreq_list, node) { int ret = 0; struct device *dev = devfreq->dev.parent; if (!strncmp(devfreq->governor->name, governor->name, DEVFREQ_NAME_LEN)) { /* The following should never occur */ if (devfreq->governor) { dev_warn(dev, "%s: Governor %s already present\n", __func__, devfreq->governor->name); ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); if (ret) { dev_warn(dev, "%s: Governor %s stop = %d\n", __func__, devfreq->governor->name, ret); } /* Fall through */ } devfreq->governor = governor; ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL); if (ret) { dev_warn(dev, "%s: Governor %s start=%d\n", __func__, devfreq->governor->name, ret); } } } err_out: mutex_unlock(&devfreq_list_lock); return err; } EXPORT_SYMBOL(devfreq_add_governor); static void devm_devfreq_remove_governor(void *governor) { WARN_ON(devfreq_remove_governor(governor)); } /** * devm_devfreq_add_governor() - Add devfreq governor * @dev: device which adds devfreq governor * @governor: the devfreq governor to be added * * This is a resource-managed variant of devfreq_add_governor(). */ int devm_devfreq_add_governor(struct device *dev, struct devfreq_governor *governor) { int err; err = devfreq_add_governor(governor); if (err) return err; return devm_add_action_or_reset(dev, devm_devfreq_remove_governor, governor); } EXPORT_SYMBOL(devm_devfreq_add_governor); /** * devfreq_remove_governor() - Remove devfreq feature from a device. * @governor: the devfreq governor to be removed */ int devfreq_remove_governor(struct devfreq_governor *governor) { struct devfreq_governor *g; struct devfreq *devfreq; int err = 0; if (!governor) { pr_err("%s: Invalid parameters.\n", __func__); return -EINVAL; } mutex_lock(&devfreq_list_lock); g = find_devfreq_governor(governor->name); if (IS_ERR(g)) { pr_err("%s: governor %s not registered\n", __func__, governor->name); err = PTR_ERR(g); goto err_out; } list_for_each_entry(devfreq, &devfreq_list, node) { int ret; struct device *dev = devfreq->dev.parent; if (!strncmp(devfreq->governor->name, governor->name, DEVFREQ_NAME_LEN)) { /* we should have a devfreq governor! */ if (!devfreq->governor) { dev_warn(dev, "%s: Governor %s NOT present\n", __func__, governor->name); continue; /* Fall through */ } ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); if (ret) { dev_warn(dev, "%s: Governor %s stop=%d\n", __func__, devfreq->governor->name, ret); } devfreq->governor = NULL; } } list_del(&governor->node); err_out: mutex_unlock(&devfreq_list_lock); return err; } EXPORT_SYMBOL(devfreq_remove_governor); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); return sprintf(buf, "%s\n", dev_name(df->dev.parent)); } static DEVICE_ATTR_RO(name); static ssize_t governor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); if (!df->governor) return -EINVAL; return sprintf(buf, "%s\n", df->governor->name); } static ssize_t governor_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); int ret; char str_governor[DEVFREQ_NAME_LEN + 1]; const struct devfreq_governor *governor, *prev_governor; if (!df->governor) return -EINVAL; ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); if (ret != 1) return -EINVAL; mutex_lock(&devfreq_list_lock); governor = try_then_request_governor(str_governor); if (IS_ERR(governor)) { ret = PTR_ERR(governor); goto out; } if (df->governor == governor) { ret = 0; goto out; } else if (IS_SUPPORTED_FLAG(df->governor->flags, IMMUTABLE) || IS_SUPPORTED_FLAG(governor->flags, IMMUTABLE)) { ret = -EINVAL; goto out; } /* * Stop the current governor and remove the specific sysfs files * which depend on current governor. */ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); if (ret) { dev_warn(dev, "%s: Governor %s not stopped(%d)\n", __func__, df->governor->name, ret); goto out; } remove_sysfs_files(df, df->governor); /* * Start the new governor and create the specific sysfs files * which depend on the new governor. */ prev_governor = df->governor; df->governor = governor; ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); if (ret) { dev_warn(dev, "%s: Governor %s not started(%d)\n", __func__, df->governor->name, ret); /* Restore previous governor */ df->governor = prev_governor; ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); if (ret) { dev_err(dev, "%s: reverting to Governor %s failed (%d)\n", __func__, prev_governor->name, ret); df->governor = NULL; goto out; } } /* * Create the sysfs files for the new governor. But if failed to start * the new governor, restore the sysfs files of previous governor. */ create_sysfs_files(df, df->governor); out: mutex_unlock(&devfreq_list_lock); if (!ret) ret = count; return ret; } static DEVICE_ATTR_RW(governor); static ssize_t available_governors_show(struct device *d, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(d); ssize_t count = 0; if (!df->governor) return -EINVAL; mutex_lock(&devfreq_list_lock); /* * The devfreq with immutable governor (e.g., passive) shows * only own governor. */ if (IS_SUPPORTED_FLAG(df->governor->flags, IMMUTABLE)) { count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, "%s ", df->governor->name); /* * The devfreq device shows the registered governor except for * immutable governors such as passive governor . */ } else { struct devfreq_governor *governor; list_for_each_entry(governor, &devfreq_governor_list, node) { if (IS_SUPPORTED_FLAG(governor->flags, IMMUTABLE)) continue; count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), "%s ", governor->name); } } mutex_unlock(&devfreq_list_lock); /* Truncate the trailing space */ if (count) count--; count += sprintf(&buf[count], "\n"); return count; } static DEVICE_ATTR_RO(available_governors); static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long freq; struct devfreq *df = to_devfreq(dev); if (!df->profile) return -EINVAL; if (df->profile->get_cur_freq && !df->profile->get_cur_freq(df->dev.parent, &freq)) return sprintf(buf, "%lu\n", freq); return sprintf(buf, "%lu\n", df->previous_freq); } static DEVICE_ATTR_RO(cur_freq); static ssize_t target_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); return sprintf(buf, "%lu\n", df->previous_freq); } static DEVICE_ATTR_RO(target_freq); static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); unsigned long value; int ret; /* * Protect against theoretical sysfs writes between * device_add and dev_pm_qos_add_request */ if (!dev_pm_qos_request_active(&df->user_min_freq_req)) return -EAGAIN; ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; /* Round down to kHz for PM QoS */ ret = dev_pm_qos_update_request(&df->user_min_freq_req, value / HZ_PER_KHZ); if (ret < 0) return ret; return count; } static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); unsigned long min_freq, max_freq; mutex_lock(&df->lock); devfreq_get_freq_range(df, &min_freq, &max_freq); mutex_unlock(&df->lock); return sprintf(buf, "%lu\n", min_freq); } static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); unsigned long value; int ret; /* * Protect against theoretical sysfs writes between * device_add and dev_pm_qos_add_request */ if (!dev_pm_qos_request_active(&df->user_max_freq_req)) return -EINVAL; ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; /* * PM QoS frequencies are in kHz so we need to convert. Convert by * rounding upwards so that the acceptable interval never shrinks. * * For example if the user writes "666666666" to sysfs this value will * be converted to 666667 kHz and back to 666667000 Hz before an OPP * lookup, this ensures that an OPP of 666666666Hz is still accepted. * * A value of zero means "no limit". */ if (value) value = DIV_ROUND_UP(value, HZ_PER_KHZ); else value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; ret = dev_pm_qos_update_request(&df->user_max_freq_req, value); if (ret < 0) return ret; return count; } static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); unsigned long min_freq, max_freq; mutex_lock(&df->lock); devfreq_get_freq_range(df, &min_freq, &max_freq); mutex_unlock(&df->lock); return sprintf(buf, "%lu\n", max_freq); } static DEVICE_ATTR_RW(max_freq); static ssize_t available_frequencies_show(struct device *d, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(d); ssize_t count = 0; int i; if (!df->profile) return -EINVAL; mutex_lock(&df->lock); for (i = 0; i < df->max_state; i++) count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), "%lu ", df->freq_table[i]); mutex_unlock(&df->lock); /* Truncate the trailing space */ if (count) count--; count += sprintf(&buf[count], "\n"); return count; } static DEVICE_ATTR_RO(available_frequencies); static ssize_t trans_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); ssize_t len; int i, j; unsigned int max_state; if (!df->profile) return -EINVAL; max_state = df->max_state; if (max_state == 0) return sprintf(buf, "Not Supported.\n"); mutex_lock(&df->lock); if (!df->stop_polling && devfreq_update_status(df, df->previous_freq)) { mutex_unlock(&df->lock); return 0; } mutex_unlock(&df->lock); len = sprintf(buf, " From : To\n"); len += sprintf(buf + len, " :"); for (i = 0; i < max_state; i++) len += sprintf(buf + len, "%10lu", df->freq_table[i]); len += sprintf(buf + len, " time(ms)\n"); for (i = 0; i < max_state; i++) { if (df->freq_table[i] == df->previous_freq) len += sprintf(buf + len, "*"); else len += sprintf(buf + len, " "); len += sprintf(buf + len, "%10lu:", df->freq_table[i]); for (j = 0; j < max_state; j++) len += sprintf(buf + len, "%10u", df->stats.trans_table[(i * max_state) + j]); len += sprintf(buf + len, "%10llu\n", (u64) jiffies64_to_msecs(df->stats.time_in_state[i])); } len += sprintf(buf + len, "Total transition : %u\n", df->stats.total_trans); return len; } static ssize_t trans_stat_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); int err, value; if (!df->profile) return -EINVAL; if (df->max_state == 0) return count; err = kstrtoint(buf, 10, &value); if (err || value != 0) return -EINVAL; mutex_lock(&df->lock); memset(df->stats.time_in_state, 0, (df->max_state * sizeof(*df->stats.time_in_state))); memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int), df->max_state, df->max_state)); df->stats.total_trans = 0; df->stats.last_update = get_jiffies_64(); mutex_unlock(&df->lock); return count; } static DEVICE_ATTR_RW(trans_stat); static struct attribute *devfreq_attrs[] = { &dev_attr_name.attr, &dev_attr_governor.attr, &dev_attr_available_governors.attr, &dev_attr_cur_freq.attr, &dev_attr_available_frequencies.attr, &dev_attr_target_freq.attr, &dev_attr_min_freq.attr, &dev_attr_max_freq.attr, &dev_attr_trans_stat.attr, NULL, }; ATTRIBUTE_GROUPS(devfreq); static ssize_t polling_interval_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); if (!df->profile) return -EINVAL; return sprintf(buf, "%d\n", df->profile->polling_ms); } static ssize_t polling_interval_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); unsigned int value; int ret; if (!df->governor) return -EINVAL; ret = sscanf(buf, "%u", &value); if (ret != 1) return -EINVAL; df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value); ret = count; return ret; } static DEVICE_ATTR_RW(polling_interval); static ssize_t timer_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); if (!df->profile) return -EINVAL; return sprintf(buf, "%s\n", timer_name[df->profile->timer]); } static ssize_t timer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); char str_timer[DEVFREQ_NAME_LEN + 1]; int timer = -1; int ret = 0, i; if (!df->governor || !df->profile) return -EINVAL; ret = sscanf(buf, "%16s", str_timer); if (ret != 1) return -EINVAL; for (i = 0; i < DEVFREQ_TIMER_NUM; i++) { if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) { timer = i; break; } } if (timer < 0) { ret = -EINVAL; goto out; } if (df->profile->timer == timer) { ret = 0; goto out; } mutex_lock(&df->lock); df->profile->timer = timer; mutex_unlock(&df->lock); ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); if (ret) { dev_warn(dev, "%s: Governor %s not stopped(%d)\n", __func__, df->governor->name, ret); goto out; } ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); if (ret) dev_warn(dev, "%s: Governor %s not started(%d)\n", __func__, df->governor->name, ret); out: return ret ? ret : count; } static DEVICE_ATTR_RW(timer); #define CREATE_SYSFS_FILE(df, name) \ { \ int ret; \ ret = sysfs_create_file(&df->dev.kobj, &dev_attr_##name.attr); \ if (ret < 0) { \ dev_warn(&df->dev, \ "Unable to create attr(%s)\n", "##name"); \ } \ } \ /* Create the specific sysfs files which depend on each governor. */ static void create_sysfs_files(struct devfreq *devfreq, const struct devfreq_governor *gov) { if (IS_SUPPORTED_ATTR(gov->attrs, POLLING_INTERVAL)) CREATE_SYSFS_FILE(devfreq, polling_interval); if (IS_SUPPORTED_ATTR(gov->attrs, TIMER)) CREATE_SYSFS_FILE(devfreq, timer); } /* Remove the specific sysfs files which depend on each governor. */ static void remove_sysfs_files(struct devfreq *devfreq, const struct devfreq_governor *gov) { if (IS_SUPPORTED_ATTR(gov->attrs, POLLING_INTERVAL)) sysfs_remove_file(&devfreq->dev.kobj, &dev_attr_polling_interval.attr); if (IS_SUPPORTED_ATTR(gov->attrs, TIMER)) sysfs_remove_file(&devfreq->dev.kobj, &dev_attr_timer.attr); } /** * devfreq_summary_show() - Show the summary of the devfreq devices * @s: seq_file instance to show the summary of devfreq devices * @data: not used * * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file. * It helps that user can know the detailed information of the devfreq devices. * * Return 0 always because it shows the information without any data change. */ static int devfreq_summary_show(struct seq_file *s, void *data) { struct devfreq *devfreq; struct devfreq *p_devfreq = NULL; unsigned long cur_freq, min_freq, max_freq; unsigned int polling_ms; unsigned int timer; seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n", "dev", "parent_dev", "governor", "timer", "polling_ms", "cur_freq_Hz", "min_freq_Hz", "max_freq_Hz"); seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n", "------------------------------", "------------------------------", "---------------", "----------", "----------", "------------", "------------", "------------"); mutex_lock(&devfreq_list_lock); list_for_each_entry_reverse(devfreq, &devfreq_list, node) { #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) if (!strncmp(devfreq->governor->name, DEVFREQ_GOV_PASSIVE, DEVFREQ_NAME_LEN)) { struct devfreq_passive_data *data = devfreq->data; if (data) p_devfreq = data->parent; } else { p_devfreq = NULL; } #endif mutex_lock(&devfreq->lock); cur_freq = devfreq->previous_freq; devfreq_get_freq_range(devfreq, &min_freq, &max_freq); timer = devfreq->profile->timer; if (IS_SUPPORTED_ATTR(devfreq->governor->attrs, POLLING_INTERVAL)) polling_ms = devfreq->profile->polling_ms; else polling_ms = 0; mutex_unlock(&devfreq->lock); seq_printf(s, "%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n", dev_name(&devfreq->dev), p_devfreq ? dev_name(&p_devfreq->dev) : "null", devfreq->governor->name, polling_ms ? timer_name[timer] : "null", polling_ms, cur_freq, min_freq, max_freq); } mutex_unlock(&devfreq_list_lock); return 0; } DEFINE_SHOW_ATTRIBUTE(devfreq_summary); static int __init devfreq_init(void) { devfreq_class = class_create("devfreq"); if (IS_ERR(devfreq_class)) { pr_err("%s: couldn't create class\n", __FILE__); return PTR_ERR(devfreq_class); } devfreq_wq = create_freezable_workqueue("devfreq_wq"); if (!devfreq_wq) { class_destroy(devfreq_class); pr_err("%s: couldn't create workqueue\n", __FILE__); return -ENOMEM; } devfreq_class->dev_groups = devfreq_groups; devfreq_debugfs = debugfs_create_dir("devfreq", NULL); debugfs_create_file("devfreq_summary", 0444, devfreq_debugfs, NULL, &devfreq_summary_fops); return 0; } subsys_initcall(devfreq_init); /* * The following are helper functions for devfreq user device drivers with * OPP framework. */ /** * devfreq_recommended_opp() - Helper function to get proper OPP for the * freq value given to target callback. * @dev: The devfreq user device. (parent of devfreq) * @freq: The frequency given to target function * @flags: Flags handed from devfreq framework. * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use. */ struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags) { struct dev_pm_opp *opp; if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { /* The freq is an upper bound. opp should be lower */ opp = dev_pm_opp_find_freq_floor(dev, freq); /* If not available, use the closest opp */ if (opp == ERR_PTR(-ERANGE)) opp = dev_pm_opp_find_freq_ceil(dev, freq); } else { /* The freq is an lower bound. opp should be higher */ opp = dev_pm_opp_find_freq_ceil(dev, freq); /* If not available, use the closest opp */ if (opp == ERR_PTR(-ERANGE)) opp = dev_pm_opp_find_freq_floor(dev, freq); } return opp; } EXPORT_SYMBOL(devfreq_recommended_opp); /** * devfreq_register_opp_notifier() - Helper function to get devfreq notified * for any changes in the OPP availability * changes * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. */ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) { return dev_pm_opp_register_notifier(dev, &devfreq->nb); } EXPORT_SYMBOL(devfreq_register_opp_notifier); /** * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq * notified for any changes in the OPP * availability changes anymore. * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * * At exit() callback of devfreq_dev_profile, this must be included if * devfreq_recommended_opp is used. */ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { return dev_pm_opp_unregister_notifier(dev, &devfreq->nb); } EXPORT_SYMBOL(devfreq_unregister_opp_notifier); static void devm_devfreq_opp_release(struct device *dev, void *res) { devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res); } /** * devm_devfreq_register_opp_notifier() - Resource-managed * devfreq_register_opp_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. */ int devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) { struct devfreq **ptr; int ret; ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = devfreq_register_opp_notifier(dev, devfreq); if (ret) { devres_free(ptr); return ret; } *ptr = devfreq; devres_add(dev, ptr); return 0; } EXPORT_SYMBOL(devm_devfreq_register_opp_notifier); /** * devm_devfreq_unregister_opp_notifier() - Resource-managed * devfreq_unregister_opp_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. */ void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { WARN_ON(devres_release(dev, devm_devfreq_opp_release, devm_devfreq_dev_match, devfreq)); } EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier); /** * devfreq_register_notifier() - Register a driver with devfreq * @devfreq: The devfreq object. * @nb: The notifier block to register. * @list: DEVFREQ_TRANSITION_NOTIFIER. */ int devfreq_register_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { int ret = 0; if (!devfreq) return -EINVAL; switch (list) { case DEVFREQ_TRANSITION_NOTIFIER: ret = srcu_notifier_chain_register( &devfreq->transition_notifier_list, nb); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(devfreq_register_notifier); /* * devfreq_unregister_notifier() - Unregister a driver with devfreq * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. * @list: DEVFREQ_TRANSITION_NOTIFIER. */ int devfreq_unregister_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { int ret = 0; if (!devfreq) return -EINVAL; switch (list) { case DEVFREQ_TRANSITION_NOTIFIER: ret = srcu_notifier_chain_unregister( &devfreq->transition_notifier_list, nb); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(devfreq_unregister_notifier); struct devfreq_notifier_devres { struct devfreq *devfreq; struct notifier_block *nb; unsigned int list; }; static void devm_devfreq_notifier_release(struct device *dev, void *res) { struct devfreq_notifier_devres *this = res; devfreq_unregister_notifier(this->devfreq, this->nb, this->list); } /** * devm_devfreq_register_notifier() * - Resource-managed devfreq_register_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. * @list: DEVFREQ_TRANSITION_NOTIFIER. */ int devm_devfreq_register_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { struct devfreq_notifier_devres *ptr; int ret; ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = devfreq_register_notifier(devfreq, nb, list); if (ret) { devres_free(ptr); return ret; } ptr->devfreq = devfreq; ptr->nb = nb; ptr->list = list; devres_add(dev, ptr); return 0; } EXPORT_SYMBOL(devm_devfreq_register_notifier); /** * devm_devfreq_unregister_notifier() * - Resource-managed devfreq_unregister_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. * @list: DEVFREQ_TRANSITION_NOTIFIER. */ void devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { WARN_ON(devres_release(dev, devm_devfreq_notifier_release, devm_devfreq_dev_match, devfreq)); } EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
linux-master
drivers/devfreq/devfreq.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP */ #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/devfreq.h> #include <linux/pm_opp.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/arm-smccc.h> #define IMX_SIP_DDR_DVFS 0xc2000004 /* Query available frequencies. */ #define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10 #define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11 /* * This should be in a 1:1 mapping with devicetree OPPs but * firmware provides additional info. */ struct imx8m_ddrc_freq { unsigned long rate; unsigned long smcarg; int dram_core_parent_index; int dram_alt_parent_index; int dram_apb_parent_index; }; /* Hardware limitation */ #define IMX8M_DDRC_MAX_FREQ_COUNT 4 /* * i.MX8M DRAM Controller clocks have the following structure (abridged): * * +----------+ |\ +------+ * | dram_pll |-------|M| dram_core | | * +----------+ |U|---------->| D | * /--|X| | D | * dram_alt_root | |/ | R | * | | C | * +---------+ | | * |FIX DIV/4| | | * +---------+ | | * composite: | | | * +----------+ | | | * | dram_alt |----/ | | * +----------+ | | * | dram_apb |-------------------->| | * +----------+ +------+ * * The dram_pll is used for higher rates and dram_alt is used for lower rates. * * Frequency switching is implemented in TF-A (via SMC call) and can change the * configuration of the clocks, including mux parents. The dram_alt and * dram_apb clocks are "imx composite" and their parent can change too. * * We need to prepare/enable the new mux parents head of switching and update * their information afterwards. */ struct imx8m_ddrc { struct devfreq_dev_profile profile; struct devfreq *devfreq; /* For frequency switching: */ struct clk *dram_core; struct clk *dram_pll; struct clk *dram_alt; struct clk *dram_apb; int freq_count; struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT]; }; static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv, unsigned long rate) { struct imx8m_ddrc_freq *freq; int i; /* * Firmware reports values in MT/s, so we round-down from Hz * Rounding is extra generous to ensure a match. */ rate = DIV_ROUND_CLOSEST(rate, 250000); for (i = 0; i < priv->freq_count; ++i) { freq = &priv->freq_table[i]; if (freq->rate == rate || freq->rate + 1 == rate || freq->rate - 1 == rate) return freq; } return NULL; } static void imx8m_ddrc_smc_set_freq(int target_freq) { struct arm_smccc_res res; u32 online_cpus = 0; int cpu; local_irq_disable(); for_each_online_cpu(cpu) online_cpus |= (1 << (cpu * 8)); /* change the ddr freqency */ arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus, 0, 0, 0, 0, 0, &res); local_irq_enable(); } static struct clk *clk_get_parent_by_index(struct clk *clk, int index) { struct clk_hw *hw; hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index); return hw ? hw->clk : NULL; } static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq) { struct imx8m_ddrc *priv = dev_get_drvdata(dev); struct clk *new_dram_core_parent; struct clk *new_dram_alt_parent; struct clk *new_dram_apb_parent; int ret; /* * Fetch new parents * * new_dram_alt_parent and new_dram_apb_parent are optional but * new_dram_core_parent is not. */ new_dram_core_parent = clk_get_parent_by_index( priv->dram_core, freq->dram_core_parent_index - 1); if (!new_dram_core_parent) { dev_err(dev, "failed to fetch new dram_core parent\n"); return -EINVAL; } if (freq->dram_alt_parent_index) { new_dram_alt_parent = clk_get_parent_by_index( priv->dram_alt, freq->dram_alt_parent_index - 1); if (!new_dram_alt_parent) { dev_err(dev, "failed to fetch new dram_alt parent\n"); return -EINVAL; } } else new_dram_alt_parent = NULL; if (freq->dram_apb_parent_index) { new_dram_apb_parent = clk_get_parent_by_index( priv->dram_apb, freq->dram_apb_parent_index - 1); if (!new_dram_apb_parent) { dev_err(dev, "failed to fetch new dram_apb parent\n"); return -EINVAL; } } else new_dram_apb_parent = NULL; /* increase reference counts and ensure clks are ON before switch */ ret = clk_prepare_enable(new_dram_core_parent); if (ret) { dev_err(dev, "failed to enable new dram_core parent: %d\n", ret); goto out; } ret = clk_prepare_enable(new_dram_alt_parent); if (ret) { dev_err(dev, "failed to enable new dram_alt parent: %d\n", ret); goto out_disable_core_parent; } ret = clk_prepare_enable(new_dram_apb_parent); if (ret) { dev_err(dev, "failed to enable new dram_apb parent: %d\n", ret); goto out_disable_alt_parent; } imx8m_ddrc_smc_set_freq(freq->smcarg); /* update parents in clk tree after switch. */ ret = clk_set_parent(priv->dram_core, new_dram_core_parent); if (ret) dev_warn(dev, "failed to set dram_core parent: %d\n", ret); if (new_dram_alt_parent) { ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent); if (ret) dev_warn(dev, "failed to set dram_alt parent: %d\n", ret); } if (new_dram_apb_parent) { ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent); if (ret) dev_warn(dev, "failed to set dram_apb parent: %d\n", ret); } /* * Explicitly refresh dram PLL rate. * * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be * automatically refreshed when clk_get_rate is called on children. */ clk_get_rate(priv->dram_pll); /* * clk_set_parent transfer the reference count from old parent. * now we drop extra reference counts used during the switch */ clk_disable_unprepare(new_dram_apb_parent); out_disable_alt_parent: clk_disable_unprepare(new_dram_alt_parent); out_disable_core_parent: clk_disable_unprepare(new_dram_core_parent); out: return ret; } static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags) { struct imx8m_ddrc *priv = dev_get_drvdata(dev); struct imx8m_ddrc_freq *freq_info; struct dev_pm_opp *new_opp; unsigned long old_freq, new_freq; int ret; new_opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(new_opp)) { ret = PTR_ERR(new_opp); dev_err(dev, "failed to get recommended opp: %d\n", ret); return ret; } dev_pm_opp_put(new_opp); old_freq = clk_get_rate(priv->dram_core); if (*freq == old_freq) return 0; freq_info = imx8m_ddrc_find_freq(priv, *freq); if (!freq_info) return -EINVAL; /* * Read back the clk rate to verify switch was correct and so that * we can report it on all error paths. */ ret = imx8m_ddrc_set_freq(dev, freq_info); new_freq = clk_get_rate(priv->dram_core); if (ret) dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n", *freq, old_freq, ret, new_freq); else if (*freq != new_freq) dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n", *freq, old_freq, new_freq); else dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n", *freq, old_freq); return ret; } static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq) { struct imx8m_ddrc *priv = dev_get_drvdata(dev); *freq = clk_get_rate(priv->dram_core); return 0; } static int imx8m_ddrc_init_freq_info(struct device *dev) { struct imx8m_ddrc *priv = dev_get_drvdata(dev); struct arm_smccc_res res; int index; /* An error here means DDR DVFS API not supported by firmware */ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT, 0, 0, 0, 0, 0, 0, &res); priv->freq_count = res.a0; if (priv->freq_count <= 0 || priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT) return -ENODEV; for (index = 0; index < priv->freq_count; ++index) { struct imx8m_ddrc_freq *freq = &priv->freq_table[index]; arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO, index, 0, 0, 0, 0, 0, &res); /* Result should be strictly positive */ if ((long)res.a0 <= 0) return -ENODEV; freq->rate = res.a0; freq->smcarg = index; freq->dram_core_parent_index = res.a1; freq->dram_alt_parent_index = res.a2; freq->dram_apb_parent_index = res.a3; /* dram_core has 2 options: dram_pll or dram_alt_root */ if (freq->dram_core_parent_index != 1 && freq->dram_core_parent_index != 2) return -ENODEV; /* dram_apb and dram_alt have exactly 8 possible parents */ if (freq->dram_alt_parent_index > 8 || freq->dram_apb_parent_index > 8) return -ENODEV; /* dram_core from alt requires explicit dram_alt parent */ if (freq->dram_core_parent_index == 2 && freq->dram_alt_parent_index == 0) return -ENODEV; } return 0; } static int imx8m_ddrc_check_opps(struct device *dev) { struct imx8m_ddrc *priv = dev_get_drvdata(dev); struct imx8m_ddrc_freq *freq_info; struct dev_pm_opp *opp; unsigned long freq; int i, opp_count; /* Enumerate DT OPPs and disable those not supported by firmware */ opp_count = dev_pm_opp_get_opp_count(dev); if (opp_count < 0) return opp_count; for (i = 0, freq = 0; i < opp_count; ++i, ++freq) { opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) { dev_err(dev, "Failed enumerating OPPs: %ld\n", PTR_ERR(opp)); return PTR_ERR(opp); } dev_pm_opp_put(opp); freq_info = imx8m_ddrc_find_freq(priv, freq); if (!freq_info) { dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n", freq, DIV_ROUND_CLOSEST(freq, 250000)); dev_pm_opp_disable(dev, freq); } } return 0; } static void imx8m_ddrc_exit(struct device *dev) { dev_pm_opp_of_remove_table(dev); } static int imx8m_ddrc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct imx8m_ddrc *priv; const char *gov = DEVFREQ_GOV_USERSPACE; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); ret = imx8m_ddrc_init_freq_info(dev); if (ret) { dev_err(dev, "failed to init firmware freq info: %d\n", ret); return ret; } priv->dram_core = devm_clk_get(dev, "core"); if (IS_ERR(priv->dram_core)) { ret = PTR_ERR(priv->dram_core); dev_err(dev, "failed to fetch core clock: %d\n", ret); return ret; } priv->dram_pll = devm_clk_get(dev, "pll"); if (IS_ERR(priv->dram_pll)) { ret = PTR_ERR(priv->dram_pll); dev_err(dev, "failed to fetch pll clock: %d\n", ret); return ret; } priv->dram_alt = devm_clk_get(dev, "alt"); if (IS_ERR(priv->dram_alt)) { ret = PTR_ERR(priv->dram_alt); dev_err(dev, "failed to fetch alt clock: %d\n", ret); return ret; } priv->dram_apb = devm_clk_get(dev, "apb"); if (IS_ERR(priv->dram_apb)) { ret = PTR_ERR(priv->dram_apb); dev_err(dev, "failed to fetch apb clock: %d\n", ret); return ret; } ret = dev_pm_opp_of_add_table(dev); if (ret < 0) { dev_err(dev, "failed to get OPP table\n"); return ret; } ret = imx8m_ddrc_check_opps(dev); if (ret < 0) goto err; priv->profile.target = imx8m_ddrc_target; priv->profile.exit = imx8m_ddrc_exit; priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq; priv->profile.initial_freq = clk_get_rate(priv->dram_core); priv->devfreq = devm_devfreq_add_device(dev, &priv->profile, gov, NULL); if (IS_ERR(priv->devfreq)) { ret = PTR_ERR(priv->devfreq); dev_err(dev, "failed to add devfreq device: %d\n", ret); goto err; } return 0; err: dev_pm_opp_of_remove_table(dev); return ret; } static const struct of_device_id imx8m_ddrc_of_match[] = { { .compatible = "fsl,imx8m-ddrc", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match); static struct platform_driver imx8m_ddrc_platdrv = { .probe = imx8m_ddrc_probe, .driver = { .name = "imx8m-ddrc-devfreq", .of_match_table = imx8m_ddrc_of_match, }, }; module_platform_driver(imx8m_ddrc_platdrv); MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver"); MODULE_AUTHOR("Leonard Crestez <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/devfreq/imx8m-ddrc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2022 MediaTek Inc. */ #include <linux/clk.h> #include <linux/devfreq.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/regulator/consumer.h> struct mtk_ccifreq_platform_data { int min_volt_shift; int max_volt_shift; int proc_max_volt; int sram_min_volt; int sram_max_volt; }; struct mtk_ccifreq_drv { struct device *dev; struct devfreq *devfreq; struct regulator *proc_reg; struct regulator *sram_reg; struct clk *cci_clk; struct clk *inter_clk; int inter_voltage; unsigned long pre_freq; /* Avoid race condition for regulators between notify and policy */ struct mutex reg_lock; struct notifier_block opp_nb; const struct mtk_ccifreq_platform_data *soc_data; int vtrack_max; }; static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage) { const struct mtk_ccifreq_platform_data *soc_data = drv->soc_data; struct device *dev = drv->dev; int pre_voltage, pre_vsram, new_vsram, vsram, voltage, ret; int retry_max = drv->vtrack_max; if (!drv->sram_reg) { ret = regulator_set_voltage(drv->proc_reg, new_voltage, drv->soc_data->proc_max_volt); return ret; } pre_voltage = regulator_get_voltage(drv->proc_reg); if (pre_voltage < 0) { dev_err(dev, "invalid vproc value: %d\n", pre_voltage); return pre_voltage; } pre_vsram = regulator_get_voltage(drv->sram_reg); if (pre_vsram < 0) { dev_err(dev, "invalid vsram value: %d\n", pre_vsram); return pre_vsram; } new_vsram = clamp(new_voltage + soc_data->min_volt_shift, soc_data->sram_min_volt, soc_data->sram_max_volt); do { if (pre_voltage <= new_voltage) { vsram = clamp(pre_voltage + soc_data->max_volt_shift, soc_data->sram_min_volt, new_vsram); ret = regulator_set_voltage(drv->sram_reg, vsram, soc_data->sram_max_volt); if (ret) return ret; if (vsram == soc_data->sram_max_volt || new_vsram == soc_data->sram_min_volt) voltage = new_voltage; else voltage = vsram - soc_data->min_volt_shift; ret = regulator_set_voltage(drv->proc_reg, voltage, soc_data->proc_max_volt); if (ret) { regulator_set_voltage(drv->sram_reg, pre_vsram, soc_data->sram_max_volt); return ret; } } else if (pre_voltage > new_voltage) { voltage = max(new_voltage, pre_vsram - soc_data->max_volt_shift); ret = regulator_set_voltage(drv->proc_reg, voltage, soc_data->proc_max_volt); if (ret) return ret; if (voltage == new_voltage) vsram = new_vsram; else vsram = max(new_vsram, voltage + soc_data->min_volt_shift); ret = regulator_set_voltage(drv->sram_reg, vsram, soc_data->sram_max_volt); if (ret) { regulator_set_voltage(drv->proc_reg, pre_voltage, soc_data->proc_max_volt); return ret; } } pre_voltage = voltage; pre_vsram = vsram; if (--retry_max < 0) { dev_err(dev, "over loop count, failed to set voltage\n"); return -EINVAL; } } while (voltage != new_voltage || vsram != new_vsram); return 0; } static int mtk_ccifreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct mtk_ccifreq_drv *drv = dev_get_drvdata(dev); struct clk *cci_pll; struct dev_pm_opp *opp; unsigned long opp_rate; int voltage, pre_voltage, inter_voltage, target_voltage, ret; if (!drv) return -EINVAL; if (drv->pre_freq == *freq) return 0; inter_voltage = drv->inter_voltage; cci_pll = clk_get_parent(drv->cci_clk); opp_rate = *freq; opp = devfreq_recommended_opp(dev, &opp_rate, 1); if (IS_ERR(opp)) { dev_err(dev, "failed to find opp for freq: %ld\n", opp_rate); return PTR_ERR(opp); } mutex_lock(&drv->reg_lock); voltage = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); pre_voltage = regulator_get_voltage(drv->proc_reg); if (pre_voltage < 0) { dev_err(dev, "invalid vproc value: %d\n", pre_voltage); ret = pre_voltage; goto out_unlock; } /* scale up: set voltage first then freq. */ target_voltage = max(inter_voltage, voltage); if (pre_voltage <= target_voltage) { ret = mtk_ccifreq_set_voltage(drv, target_voltage); if (ret) { dev_err(dev, "failed to scale up voltage\n"); goto out_restore_voltage; } } /* switch the cci clock to intermediate clock source. */ ret = clk_set_parent(drv->cci_clk, drv->inter_clk); if (ret) { dev_err(dev, "failed to re-parent cci clock\n"); goto out_restore_voltage; } /* set the original clock to target rate. */ ret = clk_set_rate(cci_pll, *freq); if (ret) { dev_err(dev, "failed to set cci pll rate: %d\n", ret); clk_set_parent(drv->cci_clk, cci_pll); goto out_restore_voltage; } /* switch the cci clock back to the original clock source. */ ret = clk_set_parent(drv->cci_clk, cci_pll); if (ret) { dev_err(dev, "failed to re-parent cci clock\n"); mtk_ccifreq_set_voltage(drv, inter_voltage); goto out_unlock; } /* * If the new voltage is lower than the intermediate voltage or the * original voltage, scale down to the new voltage. */ if (voltage < inter_voltage || voltage < pre_voltage) { ret = mtk_ccifreq_set_voltage(drv, voltage); if (ret) { dev_err(dev, "failed to scale down voltage\n"); goto out_unlock; } } drv->pre_freq = *freq; mutex_unlock(&drv->reg_lock); return 0; out_restore_voltage: mtk_ccifreq_set_voltage(drv, pre_voltage); out_unlock: mutex_unlock(&drv->reg_lock); return ret; } static int mtk_ccifreq_opp_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct dev_pm_opp *opp = data; struct mtk_ccifreq_drv *drv; unsigned long freq, volt; drv = container_of(nb, struct mtk_ccifreq_drv, opp_nb); if (event == OPP_EVENT_ADJUST_VOLTAGE) { freq = dev_pm_opp_get_freq(opp); mutex_lock(&drv->reg_lock); /* current opp item is changed */ if (freq == drv->pre_freq) { volt = dev_pm_opp_get_voltage(opp); mtk_ccifreq_set_voltage(drv, volt); } mutex_unlock(&drv->reg_lock); } return 0; } static struct devfreq_dev_profile mtk_ccifreq_profile = { .target = mtk_ccifreq_target, }; static int mtk_ccifreq_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_ccifreq_drv *drv; struct devfreq_passive_data *passive_data; struct dev_pm_opp *opp; unsigned long rate, opp_volt; int ret; drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; drv->dev = dev; drv->soc_data = (const struct mtk_ccifreq_platform_data *) of_device_get_match_data(&pdev->dev); mutex_init(&drv->reg_lock); platform_set_drvdata(pdev, drv); drv->cci_clk = devm_clk_get(dev, "cci"); if (IS_ERR(drv->cci_clk)) { ret = PTR_ERR(drv->cci_clk); return dev_err_probe(dev, ret, "failed to get cci clk\n"); } drv->inter_clk = devm_clk_get(dev, "intermediate"); if (IS_ERR(drv->inter_clk)) { ret = PTR_ERR(drv->inter_clk); return dev_err_probe(dev, ret, "failed to get intermediate clk\n"); } drv->proc_reg = devm_regulator_get_optional(dev, "proc"); if (IS_ERR(drv->proc_reg)) { ret = PTR_ERR(drv->proc_reg); return dev_err_probe(dev, ret, "failed to get proc regulator\n"); } ret = regulator_enable(drv->proc_reg); if (ret) { dev_err(dev, "failed to enable proc regulator\n"); return ret; } drv->sram_reg = devm_regulator_get_optional(dev, "sram"); if (IS_ERR(drv->sram_reg)) { ret = PTR_ERR(drv->sram_reg); if (ret == -EPROBE_DEFER) goto out_free_resources; drv->sram_reg = NULL; } else { ret = regulator_enable(drv->sram_reg); if (ret) { dev_err(dev, "failed to enable sram regulator\n"); goto out_free_resources; } } /* * We assume min voltage is 0 and tracking target voltage using * min_volt_shift for each iteration. * The retry_max is 3 times of expected iteration count. */ drv->vtrack_max = 3 * DIV_ROUND_UP(max(drv->soc_data->sram_max_volt, drv->soc_data->proc_max_volt), drv->soc_data->min_volt_shift); ret = clk_prepare_enable(drv->cci_clk); if (ret) goto out_free_resources; ret = dev_pm_opp_of_add_table(dev); if (ret) { dev_err(dev, "failed to add opp table: %d\n", ret); goto out_disable_cci_clk; } rate = clk_get_rate(drv->inter_clk); opp = dev_pm_opp_find_freq_ceil(dev, &rate); if (IS_ERR(opp)) { ret = PTR_ERR(opp); dev_err(dev, "failed to get intermediate opp: %d\n", ret); goto out_remove_opp_table; } drv->inter_voltage = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); rate = U32_MAX; opp = dev_pm_opp_find_freq_floor(drv->dev, &rate); if (IS_ERR(opp)) { dev_err(dev, "failed to get opp\n"); ret = PTR_ERR(opp); goto out_remove_opp_table; } opp_volt = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); ret = mtk_ccifreq_set_voltage(drv, opp_volt); if (ret) { dev_err(dev, "failed to scale to highest voltage %lu in proc_reg\n", opp_volt); goto out_remove_opp_table; } passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL); if (!passive_data) { ret = -ENOMEM; goto out_remove_opp_table; } passive_data->parent_type = CPUFREQ_PARENT_DEV; drv->devfreq = devm_devfreq_add_device(dev, &mtk_ccifreq_profile, DEVFREQ_GOV_PASSIVE, passive_data); if (IS_ERR(drv->devfreq)) { ret = -EPROBE_DEFER; dev_err(dev, "failed to add devfreq device: %ld\n", PTR_ERR(drv->devfreq)); goto out_remove_opp_table; } drv->opp_nb.notifier_call = mtk_ccifreq_opp_notifier; ret = dev_pm_opp_register_notifier(dev, &drv->opp_nb); if (ret) { dev_err(dev, "failed to register opp notifier: %d\n", ret); goto out_remove_opp_table; } return 0; out_remove_opp_table: dev_pm_opp_of_remove_table(dev); out_disable_cci_clk: clk_disable_unprepare(drv->cci_clk); out_free_resources: if (regulator_is_enabled(drv->proc_reg)) regulator_disable(drv->proc_reg); if (drv->sram_reg && regulator_is_enabled(drv->sram_reg)) regulator_disable(drv->sram_reg); return ret; } static int mtk_ccifreq_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_ccifreq_drv *drv; drv = platform_get_drvdata(pdev); dev_pm_opp_unregister_notifier(dev, &drv->opp_nb); dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(drv->cci_clk); regulator_disable(drv->proc_reg); if (drv->sram_reg) regulator_disable(drv->sram_reg); return 0; } static const struct mtk_ccifreq_platform_data mt8183_platform_data = { .min_volt_shift = 100000, .max_volt_shift = 200000, .proc_max_volt = 1150000, }; static const struct mtk_ccifreq_platform_data mt8186_platform_data = { .min_volt_shift = 100000, .max_volt_shift = 250000, .proc_max_volt = 1118750, .sram_min_volt = 850000, .sram_max_volt = 1118750, }; static const struct of_device_id mtk_ccifreq_machines[] = { { .compatible = "mediatek,mt8183-cci", .data = &mt8183_platform_data }, { .compatible = "mediatek,mt8186-cci", .data = &mt8186_platform_data }, { }, }; MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines); static struct platform_driver mtk_ccifreq_platdrv = { .probe = mtk_ccifreq_probe, .remove = mtk_ccifreq_remove, .driver = { .name = "mtk-ccifreq", .of_match_table = mtk_ccifreq_machines, }, }; module_platform_driver(mtk_ccifreq_platdrv); MODULE_DESCRIPTION("MediaTek CCI devfreq driver"); MODULE_AUTHOR("Jia-Wei Chang <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/devfreq/mtk-cci-devfreq.c
// SPDX-License-Identifier: GPL-2.0-only /* * devfreq-event: a framework to provide raw data and events of devfreq devices * * Copyright (C) 2015 Samsung Electronics * Author: Chanwoo Choi <[email protected]> * * This driver is based on drivers/devfreq/devfreq.c. */ #include <linux/devfreq-event.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/of.h> static struct class *devfreq_event_class; /* The list of all devfreq event list */ static LIST_HEAD(devfreq_event_list); static DEFINE_MUTEX(devfreq_event_list_lock); #define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev) /** * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase * the enable_count of devfreq-event dev. * @edev : the devfreq-event device * * Note that this function increase the enable_count and enable the * devfreq-event device. The devfreq-event device should be enabled before * using it by devfreq device. */ int devfreq_event_enable_edev(struct devfreq_event_dev *edev) { int ret = 0; if (!edev || !edev->desc) return -EINVAL; mutex_lock(&edev->lock); if (edev->desc->ops && edev->desc->ops->enable && edev->enable_count == 0) { ret = edev->desc->ops->enable(edev); if (ret < 0) goto err; } edev->enable_count++; err: mutex_unlock(&edev->lock); return ret; } EXPORT_SYMBOL_GPL(devfreq_event_enable_edev); /** * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease * the enable_count of the devfreq-event dev. * @edev : the devfreq-event device * * Note that this function decrease the enable_count and disable the * devfreq-event device. After the devfreq-event device is disabled, * devfreq device can't use the devfreq-event device for get/set/reset * operations. */ int devfreq_event_disable_edev(struct devfreq_event_dev *edev) { int ret = 0; if (!edev || !edev->desc) return -EINVAL; mutex_lock(&edev->lock); if (edev->enable_count <= 0) { dev_warn(&edev->dev, "unbalanced enable_count\n"); ret = -EIO; goto err; } if (edev->desc->ops && edev->desc->ops->disable && edev->enable_count == 1) { ret = edev->desc->ops->disable(edev); if (ret < 0) goto err; } edev->enable_count--; err: mutex_unlock(&edev->lock); return ret; } EXPORT_SYMBOL_GPL(devfreq_event_disable_edev); /** * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or * not. * @edev : the devfreq-event device * * Note that this function check whether devfreq-event dev is enabled or not. * If return true, the devfreq-event dev is enabeld. If return false, the * devfreq-event dev is disabled. */ bool devfreq_event_is_enabled(struct devfreq_event_dev *edev) { bool enabled = false; if (!edev || !edev->desc) return enabled; mutex_lock(&edev->lock); if (edev->enable_count > 0) enabled = true; mutex_unlock(&edev->lock); return enabled; } EXPORT_SYMBOL_GPL(devfreq_event_is_enabled); /** * devfreq_event_set_event() - Set event to devfreq-event dev to start. * @edev : the devfreq-event device * * Note that this function set the event to the devfreq-event device to start * for getting the event data which could be various event type. */ int devfreq_event_set_event(struct devfreq_event_dev *edev) { int ret; if (!edev || !edev->desc) return -EINVAL; if (!edev->desc->ops || !edev->desc->ops->set_event) return -EINVAL; if (!devfreq_event_is_enabled(edev)) return -EPERM; mutex_lock(&edev->lock); ret = edev->desc->ops->set_event(edev); mutex_unlock(&edev->lock); return ret; } EXPORT_SYMBOL_GPL(devfreq_event_set_event); /** * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev. * @edev : the devfreq-event device * @edata : the calculated data of devfreq-event device * * Note that this function get the calculated event data from devfreq-event dev * after stoping the progress of whole sequence of devfreq-event dev. */ int devfreq_event_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata) { int ret; if (!edev || !edev->desc) return -EINVAL; if (!edev->desc->ops || !edev->desc->ops->get_event) return -EINVAL; if (!devfreq_event_is_enabled(edev)) return -EINVAL; edata->total_count = edata->load_count = 0; mutex_lock(&edev->lock); ret = edev->desc->ops->get_event(edev, edata); if (ret < 0) edata->total_count = edata->load_count = 0; mutex_unlock(&edev->lock); return ret; } EXPORT_SYMBOL_GPL(devfreq_event_get_event); /** * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev. * @edev : the devfreq-event device * * Note that this function stop all operations of devfreq-event dev and reset * the current event data to make the devfreq-event device into initial state. */ int devfreq_event_reset_event(struct devfreq_event_dev *edev) { int ret = 0; if (!edev || !edev->desc) return -EINVAL; if (!devfreq_event_is_enabled(edev)) return -EPERM; mutex_lock(&edev->lock); if (edev->desc->ops && edev->desc->ops->reset) ret = edev->desc->ops->reset(edev); mutex_unlock(&edev->lock); return ret; } EXPORT_SYMBOL_GPL(devfreq_event_reset_event); /** * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from * devicetree. * @dev : the pointer to the given device * @phandle_name: name of property holding a phandle value * @index : the index into list of devfreq-event device * * Note that this function return the pointer of devfreq-event device. */ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev, const char *phandle_name, int index) { struct device_node *node; struct devfreq_event_dev *edev; if (!dev->of_node || !phandle_name) return ERR_PTR(-EINVAL); node = of_parse_phandle(dev->of_node, phandle_name, index); if (!node) return ERR_PTR(-ENODEV); mutex_lock(&devfreq_event_list_lock); list_for_each_entry(edev, &devfreq_event_list, node) { if (edev->dev.parent && device_match_of_node(edev->dev.parent, node)) goto out; } list_for_each_entry(edev, &devfreq_event_list, node) { if (of_node_name_eq(node, edev->desc->name)) goto out; } edev = NULL; out: mutex_unlock(&devfreq_event_list_lock); if (!edev) { of_node_put(node); return ERR_PTR(-ENODEV); } of_node_put(node); return edev; } EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle); /** * devfreq_event_get_edev_count() - Get the count of devfreq-event dev * @dev : the pointer to the given device * @phandle_name: name of property holding a phandle value * * Note that this function return the count of devfreq-event devices. */ int devfreq_event_get_edev_count(struct device *dev, const char *phandle_name) { int count; if (!dev->of_node || !phandle_name) { dev_err(dev, "device does not have a device node entry\n"); return -EINVAL; } count = of_property_count_elems_of_size(dev->of_node, phandle_name, sizeof(u32)); if (count < 0) { dev_err(dev, "failed to get the count of devfreq-event in %pOF node\n", dev->of_node); return count; } return count; } EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count); static void devfreq_event_release_edev(struct device *dev) { struct devfreq_event_dev *edev = to_devfreq_event(dev); kfree(edev); } /** * devfreq_event_add_edev() - Add new devfreq-event device. * @dev : the device owning the devfreq-event device being created * @desc : the devfreq-event device's descriptor which include essential * data for devfreq-event device. * * Note that this function add new devfreq-event device to devfreq-event class * list and register the device of the devfreq-event device. */ struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc) { struct devfreq_event_dev *edev; static atomic_t event_no = ATOMIC_INIT(-1); int ret; if (!dev || !desc) return ERR_PTR(-EINVAL); if (!desc->name || !desc->ops) return ERR_PTR(-EINVAL); if (!desc->ops->set_event || !desc->ops->get_event) return ERR_PTR(-EINVAL); edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL); if (!edev) return ERR_PTR(-ENOMEM); mutex_init(&edev->lock); edev->desc = desc; edev->enable_count = 0; edev->dev.parent = dev; edev->dev.class = devfreq_event_class; edev->dev.release = devfreq_event_release_edev; dev_set_name(&edev->dev, "event%d", atomic_inc_return(&event_no)); ret = device_register(&edev->dev); if (ret < 0) { put_device(&edev->dev); return ERR_PTR(ret); } dev_set_drvdata(&edev->dev, edev); INIT_LIST_HEAD(&edev->node); mutex_lock(&devfreq_event_list_lock); list_add(&edev->node, &devfreq_event_list); mutex_unlock(&devfreq_event_list_lock); return edev; } EXPORT_SYMBOL_GPL(devfreq_event_add_edev); /** * devfreq_event_remove_edev() - Remove the devfreq-event device registered. * @edev : the devfreq-event device * * Note that this function removes the registered devfreq-event device. */ int devfreq_event_remove_edev(struct devfreq_event_dev *edev) { if (!edev) return -EINVAL; WARN_ON(edev->enable_count); mutex_lock(&devfreq_event_list_lock); list_del(&edev->node); mutex_unlock(&devfreq_event_list_lock); device_unregister(&edev->dev); return 0; } EXPORT_SYMBOL_GPL(devfreq_event_remove_edev); static int devm_devfreq_event_match(struct device *dev, void *res, void *data) { struct devfreq_event_dev **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; } static void devm_devfreq_event_release(struct device *dev, void *res) { devfreq_event_remove_edev(*(struct devfreq_event_dev **)res); } /** * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev() * @dev : the device owning the devfreq-event device being created * @desc : the devfreq-event device's descriptor which include essential * data for devfreq-event device. * * Note that this function manages automatically the memory of devfreq-event * device using device resource management and simplify the free operation * for memory of devfreq-event device. */ struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc) { struct devfreq_event_dev **ptr, *edev; ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); edev = devfreq_event_add_edev(dev, desc); if (IS_ERR(edev)) { devres_free(ptr); return ERR_PTR(-ENOMEM); } *ptr = edev; devres_add(dev, ptr); return edev; } EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev); /** * devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev() * @dev : the device owning the devfreq-event device being created * @edev : the devfreq-event device * * Note that this function manages automatically the memory of devfreq-event * device using device resource management. */ void devm_devfreq_event_remove_edev(struct device *dev, struct devfreq_event_dev *edev) { WARN_ON(devres_release(dev, devm_devfreq_event_release, devm_devfreq_event_match, edev)); } EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev); /* * Device attributes for devfreq-event class. */ static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq_event_dev *edev = to_devfreq_event(dev); if (!edev || !edev->desc) return -EINVAL; return sprintf(buf, "%s\n", edev->desc->name); } static DEVICE_ATTR_RO(name); static ssize_t enable_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq_event_dev *edev = to_devfreq_event(dev); if (!edev || !edev->desc) return -EINVAL; return sprintf(buf, "%d\n", edev->enable_count); } static DEVICE_ATTR_RO(enable_count); static struct attribute *devfreq_event_attrs[] = { &dev_attr_name.attr, &dev_attr_enable_count.attr, NULL, }; ATTRIBUTE_GROUPS(devfreq_event); static int __init devfreq_event_init(void) { devfreq_event_class = class_create("devfreq-event"); if (IS_ERR(devfreq_event_class)) { pr_err("%s: couldn't create class\n", __FILE__); return PTR_ERR(devfreq_event_class); } devfreq_event_class->dev_groups = devfreq_event_groups; return 0; } subsys_initcall(devfreq_event_init);
linux-master
drivers/devfreq/devfreq-event.c
// SPDX-License-Identifier: GPL-2.0-only /* * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <[email protected]> */ #include <linux/clk.h> #include <linux/module.h> #include <linux/devfreq-event.h> #include <linux/kernel.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "exynos-nocp.h" struct exynos_nocp { struct devfreq_event_dev *edev; struct devfreq_event_desc desc; struct device *dev; struct regmap *regmap; struct clk *clk; }; /* * The devfreq-event ops structure for nocp probe. */ static int exynos_nocp_set_event(struct devfreq_event_dev *edev) { struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev); int ret; /* Disable NoC probe */ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL, NOCP_MAIN_CTL_STATEN_MASK, 0); if (ret < 0) { dev_err(nocp->dev, "failed to disable the NoC probe device\n"); return ret; } /* Set a statistics dump period to 0 */ ret = regmap_write(nocp->regmap, NOCP_STAT_PERIOD, 0x0); if (ret < 0) goto out; /* Set the IntEvent fields of *_SRC */ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_SRC, NOCP_CNT_SRC_INTEVENT_MASK, NOCP_CNT_SRC_INTEVENT_BYTE_MASK); if (ret < 0) goto out; ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_SRC, NOCP_CNT_SRC_INTEVENT_MASK, NOCP_CNT_SRC_INTEVENT_CHAIN_MASK); if (ret < 0) goto out; ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_SRC, NOCP_CNT_SRC_INTEVENT_MASK, NOCP_CNT_SRC_INTEVENT_CYCLE_MASK); if (ret < 0) goto out; ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_SRC, NOCP_CNT_SRC_INTEVENT_MASK, NOCP_CNT_SRC_INTEVENT_CHAIN_MASK); if (ret < 0) goto out; /* Set an alarm with a max/min value of 0 to generate StatALARM */ ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MIN, 0x0); if (ret < 0) goto out; ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MAX, 0x0); if (ret < 0) goto out; /* Set AlarmMode */ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_ALARM_MODE, NOCP_CNT_ALARM_MODE_MASK, NOCP_CNT_ALARM_MODE_MIN_MAX_MASK); if (ret < 0) goto out; ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_ALARM_MODE, NOCP_CNT_ALARM_MODE_MASK, NOCP_CNT_ALARM_MODE_MIN_MAX_MASK); if (ret < 0) goto out; ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_ALARM_MODE, NOCP_CNT_ALARM_MODE_MASK, NOCP_CNT_ALARM_MODE_MIN_MAX_MASK); if (ret < 0) goto out; ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_ALARM_MODE, NOCP_CNT_ALARM_MODE_MASK, NOCP_CNT_ALARM_MODE_MIN_MAX_MASK); if (ret < 0) goto out; /* Enable the measurements by setting AlarmEn and StatEn */ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL, NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK, NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK); if (ret < 0) goto out; /* Set GlobalEN */ ret = regmap_update_bits(nocp->regmap, NOCP_CFG_CTL, NOCP_CFG_CTL_GLOBALEN_MASK, NOCP_CFG_CTL_GLOBALEN_MASK); if (ret < 0) goto out; /* Enable NoC probe */ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL, NOCP_MAIN_CTL_STATEN_MASK, NOCP_MAIN_CTL_STATEN_MASK); if (ret < 0) goto out; return 0; out: /* Reset NoC probe */ if (regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL, NOCP_MAIN_CTL_STATEN_MASK, 0)) { dev_err(nocp->dev, "Failed to reset NoC probe device\n"); } return ret; } static int exynos_nocp_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata) { struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev); unsigned int counter[4]; int ret; /* Read cycle count */ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_0_VAL, &counter[0]); if (ret < 0) goto out; ret = regmap_read(nocp->regmap, NOCP_COUNTERS_1_VAL, &counter[1]); if (ret < 0) goto out; ret = regmap_read(nocp->regmap, NOCP_COUNTERS_2_VAL, &counter[2]); if (ret < 0) goto out; ret = regmap_read(nocp->regmap, NOCP_COUNTERS_3_VAL, &counter[3]); if (ret < 0) goto out; edata->load_count = ((counter[1] << 16) | counter[0]); edata->total_count = ((counter[3] << 16) | counter[2]); dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name, edata->load_count, edata->total_count); return 0; out: dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n"); return ret; } static const struct devfreq_event_ops exynos_nocp_ops = { .set_event = exynos_nocp_set_event, .get_event = exynos_nocp_get_event, }; static const struct of_device_id exynos_nocp_id_match[] = { { .compatible = "samsung,exynos5420-nocp", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, exynos_nocp_id_match); static struct regmap_config exynos_nocp_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .max_register = NOCP_COUNTERS_3_VAL, }; static int exynos_nocp_parse_dt(struct platform_device *pdev, struct exynos_nocp *nocp) { struct device *dev = nocp->dev; struct device_node *np = dev->of_node; struct resource *res; void __iomem *base; if (!np) { dev_err(dev, "failed to find devicetree node\n"); return -EINVAL; } nocp->clk = devm_clk_get(dev, "nocp"); if (IS_ERR(nocp->clk)) nocp->clk = NULL; /* Maps the memory mapped IO to control nocp register */ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); exynos_nocp_regmap_config.max_register = resource_size(res) - 4; nocp->regmap = devm_regmap_init_mmio(dev, base, &exynos_nocp_regmap_config); if (IS_ERR(nocp->regmap)) { dev_err(dev, "failed to initialize regmap\n"); return PTR_ERR(nocp->regmap); } return 0; } static int exynos_nocp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct exynos_nocp *nocp; int ret; nocp = devm_kzalloc(&pdev->dev, sizeof(*nocp), GFP_KERNEL); if (!nocp) return -ENOMEM; nocp->dev = &pdev->dev; /* Parse dt data to get resource */ ret = exynos_nocp_parse_dt(pdev, nocp); if (ret < 0) { dev_err(&pdev->dev, "failed to parse devicetree for resource\n"); return ret; } /* Add devfreq-event device to measure the bandwidth of NoC */ nocp->desc.ops = &exynos_nocp_ops; nocp->desc.driver_data = nocp; nocp->desc.name = np->full_name; nocp->edev = devm_devfreq_event_add_edev(&pdev->dev, &nocp->desc); if (IS_ERR(nocp->edev)) { dev_err(&pdev->dev, "failed to add devfreq-event device\n"); return PTR_ERR(nocp->edev); } platform_set_drvdata(pdev, nocp); ret = clk_prepare_enable(nocp->clk); if (ret) { dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); return ret; } pr_info("exynos-nocp: new NoC Probe device registered: %s\n", dev_name(dev)); return 0; } static int exynos_nocp_remove(struct platform_device *pdev) { struct exynos_nocp *nocp = platform_get_drvdata(pdev); clk_disable_unprepare(nocp->clk); return 0; } static struct platform_driver exynos_nocp_driver = { .probe = exynos_nocp_probe, .remove = exynos_nocp_remove, .driver = { .name = "exynos-nocp", .of_match_table = exynos_nocp_id_match, }, }; module_platform_driver(exynos_nocp_driver); MODULE_DESCRIPTION("Exynos NoC (Network on Chip) Probe driver"); MODULE_AUTHOR("Chanwoo Choi <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/devfreq/event/exynos-nocp.c
// SPDX-License-Identifier: GPL-2.0-only /* * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support * * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <[email protected]> * * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c */ #include <linux/clk.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/suspend.h> #include <linux/devfreq-event.h> #include "exynos-ppmu.h" enum exynos_ppmu_type { EXYNOS_TYPE_PPMU, EXYNOS_TYPE_PPMU_V2, }; struct exynos_ppmu_data { struct clk *clk; }; struct exynos_ppmu { struct devfreq_event_dev **edev; struct devfreq_event_desc *desc; unsigned int num_events; struct device *dev; struct regmap *regmap; struct exynos_ppmu_data ppmu; enum exynos_ppmu_type ppmu_type; }; #define PPMU_EVENT(name) \ { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \ { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \ { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ { "ppmu-event3-"#name, PPMU_PMNCNT3 } static struct __exynos_ppmu_events { char *name; int id; } ppmu_events[] = { /* For Exynos3250, Exynos4 and Exynos5260 */ PPMU_EVENT(g3d), PPMU_EVENT(fsys), /* For Exynos4 SoCs and Exynos3250 */ PPMU_EVENT(dmc0), PPMU_EVENT(dmc1), PPMU_EVENT(cpu), PPMU_EVENT(rightbus), PPMU_EVENT(leftbus), PPMU_EVENT(lcd0), PPMU_EVENT(camif), /* Only for Exynos3250 and Exynos5260 */ PPMU_EVENT(mfc), /* Only for Exynos4 SoCs */ PPMU_EVENT(mfc-left), PPMU_EVENT(mfc-right), /* Only for Exynos5260 SoCs */ PPMU_EVENT(drex0-s0), PPMU_EVENT(drex0-s1), PPMU_EVENT(drex1-s0), PPMU_EVENT(drex1-s1), PPMU_EVENT(eagle), PPMU_EVENT(kfc), PPMU_EVENT(isp), PPMU_EVENT(fimc), PPMU_EVENT(gscl), PPMU_EVENT(mscl), PPMU_EVENT(fimd0x), PPMU_EVENT(fimd1x), /* Only for Exynos5433 SoCs */ PPMU_EVENT(d0-cpu), PPMU_EVENT(d0-general), PPMU_EVENT(d0-rt), PPMU_EVENT(d1-cpu), PPMU_EVENT(d1-general), PPMU_EVENT(d1-rt), /* For Exynos5422 SoC, deprecated (backwards compatible) */ PPMU_EVENT(dmc0_0), PPMU_EVENT(dmc0_1), PPMU_EVENT(dmc1_0), PPMU_EVENT(dmc1_1), /* For Exynos5422 SoC */ PPMU_EVENT(dmc0-0), PPMU_EVENT(dmc0-1), PPMU_EVENT(dmc1-0), PPMU_EVENT(dmc1-1), }; static int __exynos_ppmu_find_ppmu_id(const char *edev_name) { int i; for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) if (!strcmp(edev_name, ppmu_events[i].name)) return ppmu_events[i].id; return -EINVAL; } static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) { return __exynos_ppmu_find_ppmu_id(edev->desc->name); } /* * The devfreq-event ops structure for PPMU v1.1 */ static int exynos_ppmu_disable(struct devfreq_event_dev *edev) { struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); int ret; u32 pmnc; /* Disable all counters */ ret = regmap_write(info->regmap, PPMU_CNTENC, PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK); if (ret < 0) return ret; /* Disable PPMU */ ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc); if (ret < 0) return ret; pmnc &= ~PPMU_PMNC_ENABLE_MASK; ret = regmap_write(info->regmap, PPMU_PMNC, pmnc); if (ret < 0) return ret; return 0; } static int exynos_ppmu_set_event(struct devfreq_event_dev *edev) { struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); int id = exynos_ppmu_find_ppmu_id(edev); int ret; u32 pmnc, cntens; if (id < 0) return id; /* Enable specific counter */ ret = regmap_read(info->regmap, PPMU_CNTENS, &cntens); if (ret < 0) return ret; cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id)); ret = regmap_write(info->regmap, PPMU_CNTENS, cntens); if (ret < 0) return ret; /* Set the event of proper data type monitoring */ ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id), edev->desc->event_type); if (ret < 0) return ret; /* Reset cycle counter/performance counter and enable PPMU */ ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc); if (ret < 0) return ret; pmnc &= ~(PPMU_PMNC_ENABLE_MASK | PPMU_PMNC_COUNTER_RESET_MASK | PPMU_PMNC_CC_RESET_MASK); pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT); pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT); pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT); ret = regmap_write(info->regmap, PPMU_PMNC, pmnc); if (ret < 0) return ret; return 0; } static int exynos_ppmu_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata) { struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); int id = exynos_ppmu_find_ppmu_id(edev); unsigned int total_count, load_count; unsigned int pmcnt3_high, pmcnt3_low; unsigned int pmnc, cntenc; int ret; if (id < 0) return -EINVAL; /* Disable PPMU */ ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc); if (ret < 0) return ret; pmnc &= ~PPMU_PMNC_ENABLE_MASK; ret = regmap_write(info->regmap, PPMU_PMNC, pmnc); if (ret < 0) return ret; /* Read cycle count */ ret = regmap_read(info->regmap, PPMU_CCNT, &total_count); if (ret < 0) return ret; edata->total_count = total_count; /* Read performance count */ switch (id) { case PPMU_PMNCNT0: case PPMU_PMNCNT1: case PPMU_PMNCNT2: ret = regmap_read(info->regmap, PPMU_PMNCT(id), &load_count); if (ret < 0) return ret; edata->load_count = load_count; break; case PPMU_PMNCNT3: ret = regmap_read(info->regmap, PPMU_PMCNT3_HIGH, &pmcnt3_high); if (ret < 0) return ret; ret = regmap_read(info->regmap, PPMU_PMCNT3_LOW, &pmcnt3_low); if (ret < 0) return ret; edata->load_count = ((pmcnt3_high << 8) | pmcnt3_low); break; default: return -EINVAL; } /* Disable specific counter */ ret = regmap_read(info->regmap, PPMU_CNTENC, &cntenc); if (ret < 0) return ret; cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id)); ret = regmap_write(info->regmap, PPMU_CNTENC, cntenc); if (ret < 0) return ret; dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name, edata->load_count, edata->total_count); return 0; } static const struct devfreq_event_ops exynos_ppmu_ops = { .disable = exynos_ppmu_disable, .set_event = exynos_ppmu_set_event, .get_event = exynos_ppmu_get_event, }; /* * The devfreq-event ops structure for PPMU v2.0 */ static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev) { struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); int ret; u32 pmnc, clear; /* Disable all counters */ clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK); ret = regmap_write(info->regmap, PPMU_V2_FLAG, clear); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_INTENC, clear); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CNTENC, clear); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CNT_RESET, clear); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG0, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG1, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG2, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CIG_RESULT, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CNT_AUTO, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CH_EV0_TYPE, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CH_EV1_TYPE, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CH_EV2_TYPE, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_CH_EV3_TYPE, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_SM_ID_V, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_SM_ID_A, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_V, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_A, 0x0); if (ret < 0) return ret; ret = regmap_write(info->regmap, PPMU_V2_INTERRUPT_RESET, 0x0); if (ret < 0) return ret; /* Disable PPMU */ ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc); if (ret < 0) return ret; pmnc &= ~PPMU_PMNC_ENABLE_MASK; ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc); if (ret < 0) return ret; return 0; } static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev) { struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); unsigned int pmnc, cntens; int id = exynos_ppmu_find_ppmu_id(edev); int ret; /* Enable all counters */ ret = regmap_read(info->regmap, PPMU_V2_CNTENS, &cntens); if (ret < 0) return ret; cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id)); ret = regmap_write(info->regmap, PPMU_V2_CNTENS, cntens); if (ret < 0) return ret; /* Set the event of proper data type monitoring */ ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id), edev->desc->event_type); if (ret < 0) return ret; /* Reset cycle counter/performance counter and enable PPMU */ ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc); if (ret < 0) return ret; pmnc &= ~(PPMU_PMNC_ENABLE_MASK | PPMU_PMNC_COUNTER_RESET_MASK | PPMU_PMNC_CC_RESET_MASK | PPMU_PMNC_CC_DIVIDER_MASK | PPMU_V2_PMNC_START_MODE_MASK); pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT); pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT); pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT); pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT); ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc); if (ret < 0) return ret; return 0; } static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata) { struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); int id = exynos_ppmu_find_ppmu_id(edev); int ret; unsigned int pmnc, cntenc; unsigned int pmcnt_high, pmcnt_low; unsigned int total_count, count; unsigned long load_count = 0; /* Disable PPMU */ ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc); if (ret < 0) return ret; pmnc &= ~PPMU_PMNC_ENABLE_MASK; ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc); if (ret < 0) return ret; /* Read cycle count and performance count */ ret = regmap_read(info->regmap, PPMU_V2_CCNT, &total_count); if (ret < 0) return ret; edata->total_count = total_count; switch (id) { case PPMU_PMNCNT0: case PPMU_PMNCNT1: case PPMU_PMNCNT2: ret = regmap_read(info->regmap, PPMU_V2_PMNCT(id), &count); if (ret < 0) return ret; load_count = count; break; case PPMU_PMNCNT3: ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_HIGH, &pmcnt_high); if (ret < 0) return ret; ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_LOW, &pmcnt_low); if (ret < 0) return ret; load_count = ((u64)((pmcnt_high & 0xff)) << 32)+ (u64)pmcnt_low; break; } edata->load_count = load_count; /* Disable all counters */ ret = regmap_read(info->regmap, PPMU_V2_CNTENC, &cntenc); if (ret < 0) return 0; cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id)); ret = regmap_write(info->regmap, PPMU_V2_CNTENC, cntenc); if (ret < 0) return ret; dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name, edata->load_count, edata->total_count); return 0; } static const struct devfreq_event_ops exynos_ppmu_v2_ops = { .disable = exynos_ppmu_v2_disable, .set_event = exynos_ppmu_v2_set_event, .get_event = exynos_ppmu_v2_get_event, }; static const struct of_device_id exynos_ppmu_id_match[] = { { .compatible = "samsung,exynos-ppmu", .data = (void *)EXYNOS_TYPE_PPMU, }, { .compatible = "samsung,exynos-ppmu-v2", .data = (void *)EXYNOS_TYPE_PPMU_V2, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match); static int of_get_devfreq_events(struct device_node *np, struct exynos_ppmu *info) { struct devfreq_event_desc *desc; struct device *dev = info->dev; struct device_node *events_np, *node; int i, j, count; const struct of_device_id *of_id; int ret; events_np = of_get_child_by_name(np, "events"); if (!events_np) { dev_err(dev, "failed to get child node of devfreq-event devices\n"); return -EINVAL; } count = of_get_child_count(events_np); desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL); if (!desc) { of_node_put(events_np); return -ENOMEM; } info->num_events = count; of_id = of_match_device(exynos_ppmu_id_match, dev); if (of_id) info->ppmu_type = (enum exynos_ppmu_type)of_id->data; else { of_node_put(events_np); return -EINVAL; } j = 0; for_each_child_of_node(events_np, node) { for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) { if (!ppmu_events[i].name) continue; if (of_node_name_eq(node, ppmu_events[i].name)) break; } if (i == ARRAY_SIZE(ppmu_events)) { dev_warn(dev, "don't know how to configure events : %pOFn\n", node); continue; } switch (info->ppmu_type) { case EXYNOS_TYPE_PPMU: desc[j].ops = &exynos_ppmu_ops; break; case EXYNOS_TYPE_PPMU_V2: desc[j].ops = &exynos_ppmu_v2_ops; break; } desc[j].driver_data = info; of_property_read_string(node, "event-name", &desc[j].name); ret = of_property_read_u32(node, "event-data-type", &desc[j].event_type); if (ret) { /* Set the event of proper data type counting. * Check if the data type has been defined in DT, * use default if not. */ if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) { /* Not all registers take the same value for * read+write data count. */ switch (ppmu_events[i].id) { case PPMU_PMNCNT0: case PPMU_PMNCNT1: case PPMU_PMNCNT2: desc[j].event_type = PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT; break; case PPMU_PMNCNT3: desc[j].event_type = PPMU_V2_EVT3_RW_DATA_CNT; break; } } else { desc[j].event_type = PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT; } } j++; } info->desc = desc; of_node_put(events_np); return 0; } static struct regmap_config exynos_ppmu_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, }; static int exynos_ppmu_parse_dt(struct platform_device *pdev, struct exynos_ppmu *info) { struct device *dev = info->dev; struct device_node *np = dev->of_node; struct resource *res; void __iomem *base; int ret = 0; if (!np) { dev_err(dev, "failed to find devicetree node\n"); return -EINVAL; } /* Maps the memory mapped IO to control PPMU register */ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); exynos_ppmu_regmap_config.max_register = resource_size(res) - 4; info->regmap = devm_regmap_init_mmio(dev, base, &exynos_ppmu_regmap_config); if (IS_ERR(info->regmap)) { dev_err(dev, "failed to initialize regmap\n"); return PTR_ERR(info->regmap); } info->ppmu.clk = devm_clk_get(dev, "ppmu"); if (IS_ERR(info->ppmu.clk)) { info->ppmu.clk = NULL; dev_warn(dev, "cannot get PPMU clock\n"); } ret = of_get_devfreq_events(np, info); if (ret < 0) { dev_err(dev, "failed to parse exynos ppmu dt node\n"); return ret; } return 0; } static int exynos_ppmu_probe(struct platform_device *pdev) { struct exynos_ppmu *info; struct devfreq_event_dev **edev; struct devfreq_event_desc *desc; int i, ret = 0, size; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->dev = &pdev->dev; /* Parse dt data to get resource */ ret = exynos_ppmu_parse_dt(pdev, info); if (ret < 0) { dev_err(&pdev->dev, "failed to parse devicetree for resource\n"); return ret; } desc = info->desc; size = sizeof(struct devfreq_event_dev *) * info->num_events; info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (!info->edev) return -ENOMEM; edev = info->edev; platform_set_drvdata(pdev, info); for (i = 0; i < info->num_events; i++) { edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]); if (IS_ERR(edev[i])) { dev_err(&pdev->dev, "failed to add devfreq-event device\n"); return PTR_ERR(edev[i]); } pr_info("exynos-ppmu: new PPMU device registered %s (%s)\n", dev_name(&pdev->dev), desc[i].name); } ret = clk_prepare_enable(info->ppmu.clk); if (ret) { dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); return ret; } return 0; } static int exynos_ppmu_remove(struct platform_device *pdev) { struct exynos_ppmu *info = platform_get_drvdata(pdev); clk_disable_unprepare(info->ppmu.clk); return 0; } static struct platform_driver exynos_ppmu_driver = { .probe = exynos_ppmu_probe, .remove = exynos_ppmu_remove, .driver = { .name = "exynos-ppmu", .of_match_table = exynos_ppmu_id_match, }, }; module_platform_driver(exynos_ppmu_driver); MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver"); MODULE_AUTHOR("Chanwoo Choi <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/devfreq/event/exynos-ppmu.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd * Author: Lin Huang <[email protected]> */ #include <linux/clk.h> #include <linux/devfreq-event.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/of.h> #include <soc/rockchip/rk3399_grf.h> #define RK3399_DMC_NUM_CH 2 /* DDRMON_CTRL */ #define DDRMON_CTRL 0x04 #define CLR_DDRMON_CTRL (0x1f0000 << 0) #define LPDDR4_EN (0x10001 << 4) #define HARDWARE_EN (0x10001 << 3) #define LPDDR3_EN (0x10001 << 2) #define SOFTWARE_EN (0x10001 << 1) #define SOFTWARE_DIS (0x10000 << 1) #define TIME_CNT_EN (0x10001 << 0) #define DDRMON_CH0_COUNT_NUM 0x28 #define DDRMON_CH0_DFI_ACCESS_NUM 0x2c #define DDRMON_CH1_COUNT_NUM 0x3c #define DDRMON_CH1_DFI_ACCESS_NUM 0x40 struct dmc_usage { u32 access; u32 total; }; /* * The dfi controller can monitor DDR load. It has an upper and lower threshold * for the operating points. Whenever the usage leaves these bounds an event is * generated to indicate the DDR frequency should be changed. */ struct rockchip_dfi { struct devfreq_event_dev *edev; struct devfreq_event_desc *desc; struct dmc_usage ch_usage[RK3399_DMC_NUM_CH]; struct device *dev; void __iomem *regs; struct regmap *regmap_pmu; struct clk *clk; }; static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); void __iomem *dfi_regs = info->regs; u32 val; u32 ddr_type; /* get ddr type */ regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) & RK3399_PMUGRF_DDRTYPE_MASK; /* clear DDRMON_CTRL setting */ writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL); /* set ddr type to dfi */ if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3) writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL); else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4) writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL); /* enable count, use software mode */ writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL); } static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); void __iomem *dfi_regs = info->regs; writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL); } static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); u32 tmp, max = 0; u32 i, busier_ch = 0; void __iomem *dfi_regs = info->regs; rockchip_dfi_stop_hardware_counter(edev); /* Find out which channel is busier */ for (i = 0; i < RK3399_DMC_NUM_CH; i++) { info->ch_usage[i].access = readl_relaxed(dfi_regs + DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4; info->ch_usage[i].total = readl_relaxed(dfi_regs + DDRMON_CH0_COUNT_NUM + i * 20); tmp = info->ch_usage[i].access; if (tmp > max) { busier_ch = i; max = tmp; } } rockchip_dfi_start_hardware_counter(edev); return busier_ch; } static int rockchip_dfi_disable(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); rockchip_dfi_stop_hardware_counter(edev); clk_disable_unprepare(info->clk); return 0; } static int rockchip_dfi_enable(struct devfreq_event_dev *edev) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); int ret; ret = clk_prepare_enable(info->clk); if (ret) { dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret); return ret; } rockchip_dfi_start_hardware_counter(edev); return 0; } static int rockchip_dfi_set_event(struct devfreq_event_dev *edev) { return 0; } static int rockchip_dfi_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata) { struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); int busier_ch; busier_ch = rockchip_dfi_get_busier_ch(edev); edata->load_count = info->ch_usage[busier_ch].access; edata->total_count = info->ch_usage[busier_ch].total; return 0; } static const struct devfreq_event_ops rockchip_dfi_ops = { .disable = rockchip_dfi_disable, .enable = rockchip_dfi_enable, .get_event = rockchip_dfi_get_event, .set_event = rockchip_dfi_set_event, }; static const struct of_device_id rockchip_dfi_id_match[] = { { .compatible = "rockchip,rk3399-dfi" }, { }, }; MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match); static int rockchip_dfi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_dfi *data; struct devfreq_event_desc *desc; struct device_node *np = pdev->dev.of_node, *node; data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL); if (!data) return -ENOMEM; data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) return PTR_ERR(data->regs); data->clk = devm_clk_get(dev, "pclk_ddr_mon"); if (IS_ERR(data->clk)) return dev_err_probe(dev, PTR_ERR(data->clk), "Cannot get the clk pclk_ddr_mon\n"); /* try to find the optional reference to the pmu syscon */ node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { data->regmap_pmu = syscon_node_to_regmap(node); of_node_put(node); if (IS_ERR(data->regmap_pmu)) return PTR_ERR(data->regmap_pmu); } data->dev = dev; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; desc->ops = &rockchip_dfi_ops; desc->driver_data = data; desc->name = np->name; data->desc = desc; data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); if (IS_ERR(data->edev)) { dev_err(&pdev->dev, "failed to add devfreq-event device\n"); return PTR_ERR(data->edev); } platform_set_drvdata(pdev, data); return 0; } static struct platform_driver rockchip_dfi_driver = { .probe = rockchip_dfi_probe, .driver = { .name = "rockchip-dfi", .of_match_table = rockchip_dfi_id_match, }, }; module_platform_driver(rockchip_dfi_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Lin Huang <[email protected]>"); MODULE_DESCRIPTION("Rockchip DFI driver");
linux-master
drivers/devfreq/event/rockchip-dfi.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2018-2020 NXP. */ #include <dt-bindings/firmware/imx/rsrc.h> #include <linux/err.h> #include <linux/firmware/imx/sci.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/thermal.h> #include "thermal_hwmon.h" #define IMX_SC_MISC_FUNC_GET_TEMP 13 static struct imx_sc_ipc *thermal_ipc_handle; struct imx_sc_sensor { struct thermal_zone_device *tzd; u32 resource_id; }; struct req_get_temp { u16 resource_id; u8 type; } __packed __aligned(4); struct resp_get_temp { s16 celsius; s8 tenths; } __packed __aligned(4); struct imx_sc_msg_misc_get_temp { struct imx_sc_rpc_msg hdr; union { struct req_get_temp req; struct resp_get_temp resp; } data; } __packed __aligned(4); static int imx_sc_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct imx_sc_msg_misc_get_temp msg; struct imx_sc_rpc_msg *hdr = &msg.hdr; struct imx_sc_sensor *sensor = thermal_zone_device_priv(tz); int ret; msg.data.req.resource_id = sensor->resource_id; msg.data.req.type = IMX_SC_C_TEMP; hdr->ver = IMX_SC_RPC_VERSION; hdr->svc = IMX_SC_RPC_SVC_MISC; hdr->func = IMX_SC_MISC_FUNC_GET_TEMP; hdr->size = 2; ret = imx_scu_call_rpc(thermal_ipc_handle, &msg, true); if (ret) return ret; *temp = msg.data.resp.celsius * 1000 + msg.data.resp.tenths * 100; return 0; } static const struct thermal_zone_device_ops imx_sc_thermal_ops = { .get_temp = imx_sc_thermal_get_temp, }; static int imx_sc_thermal_probe(struct platform_device *pdev) { struct imx_sc_sensor *sensor; const int *resource_id; int i, ret; ret = imx_scu_get_handle(&thermal_ipc_handle); if (ret) return ret; resource_id = of_device_get_match_data(&pdev->dev); if (!resource_id) return -EINVAL; for (i = 0; resource_id[i] >= 0; i++) { sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL); if (!sensor) return -ENOMEM; sensor->resource_id = resource_id[i]; sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, sensor->resource_id, sensor, &imx_sc_thermal_ops); if (IS_ERR(sensor->tzd)) { /* * Save the error value before freeing the * sensor pointer, otherwise we endup with a * use-after-free error */ ret = PTR_ERR(sensor->tzd); devm_kfree(&pdev->dev, sensor); /* * The thermal framework notifies us there is * no thermal zone description for such a * sensor id */ if (ret == -ENODEV) continue; dev_err(&pdev->dev, "failed to register thermal zone\n"); return ret; } devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd); } return 0; } static const int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, IMX_SC_R_AP_0, IMX_SC_R_AP_1, IMX_SC_R_GPU_0_PID0, IMX_SC_R_GPU_1_PID0, IMX_SC_R_DRC_0, -1 }; static const struct of_device_id imx_sc_thermal_table[] = { { .compatible = "fsl,imx-sc-thermal", .data = imx_sc_sensors }, {} }; MODULE_DEVICE_TABLE(of, imx_sc_thermal_table); static struct platform_driver imx_sc_thermal_driver = { .probe = imx_sc_thermal_probe, .driver = { .name = "imx-sc-thermal", .of_match_table = imx_sc_thermal_table, }, }; module_platform_driver(imx_sc_thermal_driver); MODULE_AUTHOR("Anson Huang <[email protected]>"); MODULE_DESCRIPTION("Thermal driver for NXP i.MX SoCs with system controller"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/imx_sc_thermal.c
// SPDX-License-Identifier: GPL-2.0 // // Copyright 2013 Freescale Semiconductor, Inc. #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/cpu_cooling.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/thermal.h> #include <linux/nvmem-consumer.h> #include <linux/pm_runtime.h> #define REG_SET 0x4 #define REG_CLR 0x8 #define REG_TOG 0xc /* i.MX6 specific */ #define IMX6_MISC0 0x0150 #define IMX6_MISC0_REFTOP_SELBIASOFF (1 << 3) #define IMX6_MISC1 0x0160 #define IMX6_MISC1_IRQ_TEMPHIGH (1 << 29) /* Below LOW and PANIC bits are only for TEMPMON_IMX6SX */ #define IMX6_MISC1_IRQ_TEMPLOW (1 << 28) #define IMX6_MISC1_IRQ_TEMPPANIC (1 << 27) #define IMX6_TEMPSENSE0 0x0180 #define IMX6_TEMPSENSE0_ALARM_VALUE_SHIFT 20 #define IMX6_TEMPSENSE0_ALARM_VALUE_MASK (0xfff << 20) #define IMX6_TEMPSENSE0_TEMP_CNT_SHIFT 8 #define IMX6_TEMPSENSE0_TEMP_CNT_MASK (0xfff << 8) #define IMX6_TEMPSENSE0_FINISHED (1 << 2) #define IMX6_TEMPSENSE0_MEASURE_TEMP (1 << 1) #define IMX6_TEMPSENSE0_POWER_DOWN (1 << 0) #define IMX6_TEMPSENSE1 0x0190 #define IMX6_TEMPSENSE1_MEASURE_FREQ 0xffff #define IMX6_TEMPSENSE1_MEASURE_FREQ_SHIFT 0 #define OCOTP_MEM0 0x0480 #define OCOTP_ANA1 0x04e0 /* Below TEMPSENSE2 is only for TEMPMON_IMX6SX */ #define IMX6_TEMPSENSE2 0x0290 #define IMX6_TEMPSENSE2_LOW_VALUE_SHIFT 0 #define IMX6_TEMPSENSE2_LOW_VALUE_MASK 0xfff #define IMX6_TEMPSENSE2_PANIC_VALUE_SHIFT 16 #define IMX6_TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 /* i.MX7 specific */ #define IMX7_ANADIG_DIGPROG 0x800 #define IMX7_TEMPSENSE0 0x300 #define IMX7_TEMPSENSE0_PANIC_ALARM_SHIFT 18 #define IMX7_TEMPSENSE0_PANIC_ALARM_MASK (0x1ff << 18) #define IMX7_TEMPSENSE0_HIGH_ALARM_SHIFT 9 #define IMX7_TEMPSENSE0_HIGH_ALARM_MASK (0x1ff << 9) #define IMX7_TEMPSENSE0_LOW_ALARM_SHIFT 0 #define IMX7_TEMPSENSE0_LOW_ALARM_MASK 0x1ff #define IMX7_TEMPSENSE1 0x310 #define IMX7_TEMPSENSE1_MEASURE_FREQ_SHIFT 16 #define IMX7_TEMPSENSE1_MEASURE_FREQ_MASK (0xffff << 16) #define IMX7_TEMPSENSE1_FINISHED (1 << 11) #define IMX7_TEMPSENSE1_MEASURE_TEMP (1 << 10) #define IMX7_TEMPSENSE1_POWER_DOWN (1 << 9) #define IMX7_TEMPSENSE1_TEMP_VALUE_SHIFT 0 #define IMX7_TEMPSENSE1_TEMP_VALUE_MASK 0x1ff /* The driver supports 1 passive trip point and 1 critical trip point */ enum imx_thermal_trip { IMX_TRIP_PASSIVE, IMX_TRIP_CRITICAL, }; #define IMX_POLLING_DELAY 2000 /* millisecond */ #define IMX_PASSIVE_DELAY 1000 #define TEMPMON_IMX6Q 1 #define TEMPMON_IMX6SX 2 #define TEMPMON_IMX7D 3 struct thermal_soc_data { u32 version; u32 sensor_ctrl; u32 power_down_mask; u32 measure_temp_mask; u32 measure_freq_ctrl; u32 measure_freq_mask; u32 measure_freq_shift; u32 temp_data; u32 temp_value_mask; u32 temp_value_shift; u32 temp_valid_mask; u32 panic_alarm_ctrl; u32 panic_alarm_mask; u32 panic_alarm_shift; u32 high_alarm_ctrl; u32 high_alarm_mask; u32 high_alarm_shift; u32 low_alarm_ctrl; u32 low_alarm_mask; u32 low_alarm_shift; }; static struct thermal_trip trips[] = { [IMX_TRIP_PASSIVE] = { .type = THERMAL_TRIP_PASSIVE }, [IMX_TRIP_CRITICAL] = { .type = THERMAL_TRIP_CRITICAL }, }; static struct thermal_soc_data thermal_imx6q_data = { .version = TEMPMON_IMX6Q, .sensor_ctrl = IMX6_TEMPSENSE0, .power_down_mask = IMX6_TEMPSENSE0_POWER_DOWN, .measure_temp_mask = IMX6_TEMPSENSE0_MEASURE_TEMP, .measure_freq_ctrl = IMX6_TEMPSENSE1, .measure_freq_shift = IMX6_TEMPSENSE1_MEASURE_FREQ_SHIFT, .measure_freq_mask = IMX6_TEMPSENSE1_MEASURE_FREQ, .temp_data = IMX6_TEMPSENSE0, .temp_value_mask = IMX6_TEMPSENSE0_TEMP_CNT_MASK, .temp_value_shift = IMX6_TEMPSENSE0_TEMP_CNT_SHIFT, .temp_valid_mask = IMX6_TEMPSENSE0_FINISHED, .high_alarm_ctrl = IMX6_TEMPSENSE0, .high_alarm_mask = IMX6_TEMPSENSE0_ALARM_VALUE_MASK, .high_alarm_shift = IMX6_TEMPSENSE0_ALARM_VALUE_SHIFT, }; static struct thermal_soc_data thermal_imx6sx_data = { .version = TEMPMON_IMX6SX, .sensor_ctrl = IMX6_TEMPSENSE0, .power_down_mask = IMX6_TEMPSENSE0_POWER_DOWN, .measure_temp_mask = IMX6_TEMPSENSE0_MEASURE_TEMP, .measure_freq_ctrl = IMX6_TEMPSENSE1, .measure_freq_shift = IMX6_TEMPSENSE1_MEASURE_FREQ_SHIFT, .measure_freq_mask = IMX6_TEMPSENSE1_MEASURE_FREQ, .temp_data = IMX6_TEMPSENSE0, .temp_value_mask = IMX6_TEMPSENSE0_TEMP_CNT_MASK, .temp_value_shift = IMX6_TEMPSENSE0_TEMP_CNT_SHIFT, .temp_valid_mask = IMX6_TEMPSENSE0_FINISHED, .high_alarm_ctrl = IMX6_TEMPSENSE0, .high_alarm_mask = IMX6_TEMPSENSE0_ALARM_VALUE_MASK, .high_alarm_shift = IMX6_TEMPSENSE0_ALARM_VALUE_SHIFT, .panic_alarm_ctrl = IMX6_TEMPSENSE2, .panic_alarm_mask = IMX6_TEMPSENSE2_PANIC_VALUE_MASK, .panic_alarm_shift = IMX6_TEMPSENSE2_PANIC_VALUE_SHIFT, .low_alarm_ctrl = IMX6_TEMPSENSE2, .low_alarm_mask = IMX6_TEMPSENSE2_LOW_VALUE_MASK, .low_alarm_shift = IMX6_TEMPSENSE2_LOW_VALUE_SHIFT, }; static struct thermal_soc_data thermal_imx7d_data = { .version = TEMPMON_IMX7D, .sensor_ctrl = IMX7_TEMPSENSE1, .power_down_mask = IMX7_TEMPSENSE1_POWER_DOWN, .measure_temp_mask = IMX7_TEMPSENSE1_MEASURE_TEMP, .measure_freq_ctrl = IMX7_TEMPSENSE1, .measure_freq_shift = IMX7_TEMPSENSE1_MEASURE_FREQ_SHIFT, .measure_freq_mask = IMX7_TEMPSENSE1_MEASURE_FREQ_MASK, .temp_data = IMX7_TEMPSENSE1, .temp_value_mask = IMX7_TEMPSENSE1_TEMP_VALUE_MASK, .temp_value_shift = IMX7_TEMPSENSE1_TEMP_VALUE_SHIFT, .temp_valid_mask = IMX7_TEMPSENSE1_FINISHED, .panic_alarm_ctrl = IMX7_TEMPSENSE1, .panic_alarm_mask = IMX7_TEMPSENSE0_PANIC_ALARM_MASK, .panic_alarm_shift = IMX7_TEMPSENSE0_PANIC_ALARM_SHIFT, .high_alarm_ctrl = IMX7_TEMPSENSE0, .high_alarm_mask = IMX7_TEMPSENSE0_HIGH_ALARM_MASK, .high_alarm_shift = IMX7_TEMPSENSE0_HIGH_ALARM_SHIFT, .low_alarm_ctrl = IMX7_TEMPSENSE0, .low_alarm_mask = IMX7_TEMPSENSE0_LOW_ALARM_MASK, .low_alarm_shift = IMX7_TEMPSENSE0_LOW_ALARM_SHIFT, }; struct imx_thermal_data { struct device *dev; struct cpufreq_policy *policy; struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; struct regmap *tempmon; u32 c1, c2; /* See formula in imx_init_calib() */ int temp_max; int alarm_temp; int last_temp; bool irq_enabled; int irq; struct clk *thermal_clk; const struct thermal_soc_data *socdata; const char *temp_grade; }; static void imx_set_panic_temp(struct imx_thermal_data *data, int panic_temp) { const struct thermal_soc_data *soc_data = data->socdata; struct regmap *map = data->tempmon; int critical_value; critical_value = (data->c2 - panic_temp) / data->c1; regmap_write(map, soc_data->panic_alarm_ctrl + REG_CLR, soc_data->panic_alarm_mask); regmap_write(map, soc_data->panic_alarm_ctrl + REG_SET, critical_value << soc_data->panic_alarm_shift); } static void imx_set_alarm_temp(struct imx_thermal_data *data, int alarm_temp) { struct regmap *map = data->tempmon; const struct thermal_soc_data *soc_data = data->socdata; int alarm_value; data->alarm_temp = alarm_temp; if (data->socdata->version == TEMPMON_IMX7D) alarm_value = alarm_temp / 1000 + data->c1 - 25; else alarm_value = (data->c2 - alarm_temp) / data->c1; regmap_write(map, soc_data->high_alarm_ctrl + REG_CLR, soc_data->high_alarm_mask); regmap_write(map, soc_data->high_alarm_ctrl + REG_SET, alarm_value << soc_data->high_alarm_shift); } static int imx_get_temp(struct thermal_zone_device *tz, int *temp) { struct imx_thermal_data *data = thermal_zone_device_priv(tz); const struct thermal_soc_data *soc_data = data->socdata; struct regmap *map = data->tempmon; unsigned int n_meas; u32 val; int ret; ret = pm_runtime_resume_and_get(data->dev); if (ret < 0) return ret; regmap_read(map, soc_data->temp_data, &val); if ((val & soc_data->temp_valid_mask) == 0) return -EAGAIN; n_meas = (val & soc_data->temp_value_mask) >> soc_data->temp_value_shift; /* See imx_init_calib() for formula derivation */ if (data->socdata->version == TEMPMON_IMX7D) *temp = (n_meas - data->c1 + 25) * 1000; else *temp = data->c2 - n_meas * data->c1; /* Update alarm value to next higher trip point for TEMPMON_IMX6Q */ if (data->socdata->version == TEMPMON_IMX6Q) { if (data->alarm_temp == trips[IMX_TRIP_PASSIVE].temperature && *temp >= trips[IMX_TRIP_PASSIVE].temperature) imx_set_alarm_temp(data, trips[IMX_TRIP_CRITICAL].temperature); if (data->alarm_temp == trips[IMX_TRIP_CRITICAL].temperature && *temp < trips[IMX_TRIP_PASSIVE].temperature) { imx_set_alarm_temp(data, trips[IMX_TRIP_PASSIVE].temperature); dev_dbg(data->dev, "thermal alarm off: T < %d\n", data->alarm_temp / 1000); } } if (*temp != data->last_temp) { dev_dbg(data->dev, "millicelsius: %d\n", *temp); data->last_temp = *temp; } /* Reenable alarm IRQ if temperature below alarm temperature */ if (!data->irq_enabled && *temp < data->alarm_temp) { data->irq_enabled = true; enable_irq(data->irq); } pm_runtime_put(data->dev); return 0; } static int imx_change_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { struct imx_thermal_data *data = thermal_zone_device_priv(tz); if (mode == THERMAL_DEVICE_ENABLED) { pm_runtime_get(data->dev); if (!data->irq_enabled) { data->irq_enabled = true; enable_irq(data->irq); } } else { pm_runtime_put(data->dev); if (data->irq_enabled) { disable_irq(data->irq); data->irq_enabled = false; } } return 0; } static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp) { struct imx_thermal_data *data = thermal_zone_device_priv(tz); struct thermal_trip trip; int ret; ret = pm_runtime_resume_and_get(data->dev); if (ret < 0) return ret; ret = __thermal_zone_get_trip(tz, trip_id, &trip); if (ret) return ret; /* do not allow changing critical threshold */ if (trip.type == THERMAL_TRIP_CRITICAL) return -EPERM; /* do not allow passive to be set higher than critical */ if (temp < 0 || temp > trips[IMX_TRIP_CRITICAL].temperature) return -EINVAL; imx_set_alarm_temp(data, temp); pm_runtime_put(data->dev); return 0; } static int imx_bind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { return thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev, THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, THERMAL_WEIGHT_DEFAULT); } static int imx_unbind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { return thermal_zone_unbind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev); } static struct thermal_zone_device_ops imx_tz_ops = { .bind = imx_bind, .unbind = imx_unbind, .get_temp = imx_get_temp, .change_mode = imx_change_mode, .set_trip_temp = imx_set_trip_temp, }; static int imx_init_calib(struct platform_device *pdev, u32 ocotp_ana1) { struct imx_thermal_data *data = platform_get_drvdata(pdev); int n1; u64 temp64; if (ocotp_ana1 == 0 || ocotp_ana1 == ~0) { dev_err(&pdev->dev, "invalid sensor calibration data\n"); return -EINVAL; } /* * On i.MX7D, we only use the calibration data at 25C to get the temp, * Tmeas = ( Nmeas - n1) + 25; n1 is the fuse value for 25C. */ if (data->socdata->version == TEMPMON_IMX7D) { data->c1 = (ocotp_ana1 >> 9) & 0x1ff; return 0; } /* * The sensor is calibrated at 25 °C (aka T1) and the value measured * (aka N1) at this temperature is provided in bits [31:20] in the * i.MX's OCOTP value ANA1. * To find the actual temperature T, the following formula has to be used * when reading value n from the sensor: * * T = T1 + (N - N1) / (0.4148468 - 0.0015423 * N1) °C + 3.580661 °C * = [T1' - N1 / (0.4148468 - 0.0015423 * N1) °C] + N / (0.4148468 - 0.0015423 * N1) °C * = [T1' + N1 / (0.0015423 * N1 - 0.4148468) °C] - N / (0.0015423 * N1 - 0.4148468) °C * = c2 - c1 * N * * with * * T1' = 28.580661 °C * c1 = 1 / (0.0015423 * N1 - 0.4297157) °C * c2 = T1' + N1 / (0.0015423 * N1 - 0.4148468) °C * = T1' + N1 * c1 */ n1 = ocotp_ana1 >> 20; temp64 = 10000000; /* use 10^7 as fixed point constant for values in formula */ temp64 *= 1000; /* to get result in °mC */ do_div(temp64, 15423 * n1 - 4148468); data->c1 = temp64; data->c2 = n1 * data->c1 + 28581; return 0; } static void imx_init_temp_grade(struct platform_device *pdev, u32 ocotp_mem0) { struct imx_thermal_data *data = platform_get_drvdata(pdev); /* The maximum die temp is specified by the Temperature Grade */ switch ((ocotp_mem0 >> 6) & 0x3) { case 0: /* Commercial (0 to 95 °C) */ data->temp_grade = "Commercial"; data->temp_max = 95000; break; case 1: /* Extended Commercial (-20 °C to 105 °C) */ data->temp_grade = "Extended Commercial"; data->temp_max = 105000; break; case 2: /* Industrial (-40 °C to 105 °C) */ data->temp_grade = "Industrial"; data->temp_max = 105000; break; case 3: /* Automotive (-40 °C to 125 °C) */ data->temp_grade = "Automotive"; data->temp_max = 125000; break; } /* * Set the critical trip point at 5 °C under max * Set the passive trip point at 10 °C under max (changeable via sysfs) */ trips[IMX_TRIP_PASSIVE].temperature = data->temp_max - (1000 * 10); trips[IMX_TRIP_CRITICAL].temperature = data->temp_max - (1000 * 5); } static int imx_init_from_tempmon_data(struct platform_device *pdev) { struct regmap *map; int ret; u32 val; map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon-data"); if (IS_ERR(map)) { ret = PTR_ERR(map); dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret); return ret; } ret = regmap_read(map, OCOTP_ANA1, &val); if (ret) { dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret); return ret; } ret = imx_init_calib(pdev, val); if (ret) return ret; ret = regmap_read(map, OCOTP_MEM0, &val); if (ret) { dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret); return ret; } imx_init_temp_grade(pdev, val); return 0; } static int imx_init_from_nvmem_cells(struct platform_device *pdev) { int ret; u32 val; ret = nvmem_cell_read_u32(&pdev->dev, "calib", &val); if (ret) return ret; ret = imx_init_calib(pdev, val); if (ret) return ret; ret = nvmem_cell_read_u32(&pdev->dev, "temp_grade", &val); if (ret) return ret; imx_init_temp_grade(pdev, val); return 0; } static irqreturn_t imx_thermal_alarm_irq(int irq, void *dev) { struct imx_thermal_data *data = dev; disable_irq_nosync(irq); data->irq_enabled = false; return IRQ_WAKE_THREAD; } static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev) { struct imx_thermal_data *data = dev; dev_dbg(data->dev, "THERMAL ALARM: T > %d\n", data->alarm_temp / 1000); thermal_zone_device_update(data->tz, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static const struct of_device_id of_imx_thermal_match[] = { { .compatible = "fsl,imx6q-tempmon", .data = &thermal_imx6q_data, }, { .compatible = "fsl,imx6sx-tempmon", .data = &thermal_imx6sx_data, }, { .compatible = "fsl,imx7d-tempmon", .data = &thermal_imx7d_data, }, { /* end */ } }; MODULE_DEVICE_TABLE(of, of_imx_thermal_match); #ifdef CONFIG_CPU_FREQ /* * Create cooling device in case no #cooling-cells property is available in * CPU node */ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) { struct device_node *np; int ret = 0; data->policy = cpufreq_cpu_get(0); if (!data->policy) { pr_debug("%s: CPUFreq policy not found\n", __func__); return -EPROBE_DEFER; } np = of_get_cpu_node(data->policy->cpu, NULL); if (!np || !of_property_present(np, "#cooling-cells")) { data->cdev = cpufreq_cooling_register(data->policy); if (IS_ERR(data->cdev)) { ret = PTR_ERR(data->cdev); cpufreq_cpu_put(data->policy); } } of_node_put(np); return ret; } static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data) { cpufreq_cooling_unregister(data->cdev); cpufreq_cpu_put(data->policy); } #else static inline int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) { return 0; } static inline void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data) { } #endif static int imx_thermal_probe(struct platform_device *pdev) { struct imx_thermal_data *data; struct regmap *map; int measure_freq; int ret; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->dev = &pdev->dev; map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon"); if (IS_ERR(map)) { ret = PTR_ERR(map); dev_err(&pdev->dev, "failed to get tempmon regmap: %d\n", ret); return ret; } data->tempmon = map; data->socdata = of_device_get_match_data(&pdev->dev); if (!data->socdata) { dev_err(&pdev->dev, "no device match found\n"); return -ENODEV; } /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ if (data->socdata->version == TEMPMON_IMX6SX) { regmap_write(map, IMX6_MISC1 + REG_CLR, IMX6_MISC1_IRQ_TEMPHIGH | IMX6_MISC1_IRQ_TEMPLOW | IMX6_MISC1_IRQ_TEMPPANIC); /* * reset value of LOW ALARM is incorrect, set it to lowest * value to avoid false trigger of low alarm. */ regmap_write(map, data->socdata->low_alarm_ctrl + REG_SET, data->socdata->low_alarm_mask); } data->irq = platform_get_irq(pdev, 0); if (data->irq < 0) return data->irq; platform_set_drvdata(pdev, data); if (of_property_present(pdev->dev.of_node, "nvmem-cells")) { ret = imx_init_from_nvmem_cells(pdev); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to init from nvmem\n"); } else { ret = imx_init_from_tempmon_data(pdev); if (ret) { dev_err(&pdev->dev, "failed to init from fsl,tempmon-data\n"); return ret; } } /* Make sure sensor is in known good state for measurements */ regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, data->socdata->power_down_mask); regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, data->socdata->measure_temp_mask); regmap_write(map, data->socdata->measure_freq_ctrl + REG_CLR, data->socdata->measure_freq_mask); if (data->socdata->version != TEMPMON_IMX7D) regmap_write(map, IMX6_MISC0 + REG_SET, IMX6_MISC0_REFTOP_SELBIASOFF); regmap_write(map, data->socdata->sensor_ctrl + REG_SET, data->socdata->power_down_mask); ret = imx_thermal_register_legacy_cooling(data); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to register cpufreq cooling device\n"); data->thermal_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(data->thermal_clk)) { ret = PTR_ERR(data->thermal_clk); if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to get thermal clk: %d\n", ret); goto legacy_cleanup; } /* * Thermal sensor needs clk on to get correct value, normally * we should enable its clk before taking measurement and disable * clk after measurement is done, but if alarm function is enabled, * hardware will auto measure the temperature periodically, so we * need to keep the clk always on for alarm function. */ ret = clk_prepare_enable(data->thermal_clk); if (ret) { dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); goto legacy_cleanup; } data->tz = thermal_zone_device_register_with_trips("imx_thermal_zone", trips, ARRAY_SIZE(trips), BIT(IMX_TRIP_PASSIVE), data, &imx_tz_ops, NULL, IMX_PASSIVE_DELAY, IMX_POLLING_DELAY); if (IS_ERR(data->tz)) { ret = PTR_ERR(data->tz); dev_err(&pdev->dev, "failed to register thermal zone device %d\n", ret); goto clk_disable; } dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC" " critical:%dC passive:%dC\n", data->temp_grade, data->temp_max / 1000, trips[IMX_TRIP_CRITICAL].temperature / 1000, trips[IMX_TRIP_PASSIVE].temperature / 1000); /* Enable measurements at ~ 10 Hz */ regmap_write(map, data->socdata->measure_freq_ctrl + REG_CLR, data->socdata->measure_freq_mask); measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ regmap_write(map, data->socdata->measure_freq_ctrl + REG_SET, measure_freq << data->socdata->measure_freq_shift); imx_set_alarm_temp(data, trips[IMX_TRIP_PASSIVE].temperature); if (data->socdata->version == TEMPMON_IMX6SX) imx_set_panic_temp(data, trips[IMX_TRIP_CRITICAL].temperature); regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, data->socdata->power_down_mask); regmap_write(map, data->socdata->sensor_ctrl + REG_SET, data->socdata->measure_temp_mask); /* After power up, we need a delay before first access can be done. */ usleep_range(20, 50); /* the core was configured and enabled just before */ pm_runtime_set_active(&pdev->dev); pm_runtime_enable(data->dev); ret = pm_runtime_resume_and_get(data->dev); if (ret < 0) goto disable_runtime_pm; data->irq_enabled = true; ret = thermal_zone_device_enable(data->tz); if (ret) goto thermal_zone_unregister; ret = devm_request_threaded_irq(&pdev->dev, data->irq, imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread, 0, "imx_thermal", data); if (ret < 0) { dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); goto thermal_zone_unregister; } pm_runtime_put(data->dev); return 0; thermal_zone_unregister: thermal_zone_device_unregister(data->tz); disable_runtime_pm: pm_runtime_put_noidle(data->dev); pm_runtime_disable(data->dev); clk_disable: clk_disable_unprepare(data->thermal_clk); legacy_cleanup: imx_thermal_unregister_legacy_cooling(data); return ret; } static int imx_thermal_remove(struct platform_device *pdev) { struct imx_thermal_data *data = platform_get_drvdata(pdev); pm_runtime_put_noidle(data->dev); pm_runtime_disable(data->dev); thermal_zone_device_unregister(data->tz); imx_thermal_unregister_legacy_cooling(data); return 0; } static int __maybe_unused imx_thermal_suspend(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); int ret; /* * Need to disable thermal sensor, otherwise, when thermal core * try to get temperature before thermal sensor resume, a wrong * temperature will be read as the thermal sensor is powered * down. This is done in change_mode() operation called from * thermal_zone_device_disable() */ ret = thermal_zone_device_disable(data->tz); if (ret) return ret; return pm_runtime_force_suspend(data->dev); } static int __maybe_unused imx_thermal_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); int ret; ret = pm_runtime_force_resume(data->dev); if (ret) return ret; /* Enabled thermal sensor after resume */ return thermal_zone_device_enable(data->tz); } static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); const struct thermal_soc_data *socdata = data->socdata; struct regmap *map = data->tempmon; int ret; ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR, socdata->measure_temp_mask); if (ret) return ret; ret = regmap_write(map, socdata->sensor_ctrl + REG_SET, socdata->power_down_mask); if (ret) return ret; clk_disable_unprepare(data->thermal_clk); return 0; } static int __maybe_unused imx_thermal_runtime_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); const struct thermal_soc_data *socdata = data->socdata; struct regmap *map = data->tempmon; int ret; ret = clk_prepare_enable(data->thermal_clk); if (ret) return ret; ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR, socdata->power_down_mask); if (ret) return ret; ret = regmap_write(map, socdata->sensor_ctrl + REG_SET, socdata->measure_temp_mask); if (ret) return ret; /* * According to the temp sensor designers, it may require up to ~17us * to complete a measurement. */ usleep_range(20, 50); return 0; } static const struct dev_pm_ops imx_thermal_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume) SET_RUNTIME_PM_OPS(imx_thermal_runtime_suspend, imx_thermal_runtime_resume, NULL) }; static struct platform_driver imx_thermal = { .driver = { .name = "imx_thermal", .pm = &imx_thermal_pm_ops, .of_match_table = of_imx_thermal_match, }, .probe = imx_thermal_probe, .remove = imx_thermal_remove, }; module_platform_driver(imx_thermal); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("Thermal driver for Freescale i.MX SoCs"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:imx-thermal");
linux-master
drivers/thermal/imx_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic ADC thermal driver * * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved. * * Author: Laxman Dewangan <[email protected]> */ #include <linux/iio/consumer.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/thermal.h> #include "thermal_hwmon.h" struct gadc_thermal_info { struct device *dev; struct thermal_zone_device *tz_dev; struct iio_channel *channel; s32 *lookup_table; int nlookup_table; }; static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val) { int temp, temp_hi, temp_lo, adc_hi, adc_lo; int i; if (!gti->lookup_table) return val; for (i = 0; i < gti->nlookup_table; i++) { if (val >= gti->lookup_table[2 * i + 1]) break; } if (i == 0) { temp = gti->lookup_table[0]; } else if (i >= gti->nlookup_table) { temp = gti->lookup_table[2 * (gti->nlookup_table - 1)]; } else { adc_hi = gti->lookup_table[2 * i - 1]; adc_lo = gti->lookup_table[2 * i + 1]; temp_hi = gti->lookup_table[2 * i - 2]; temp_lo = gti->lookup_table[2 * i]; temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi, adc_lo - adc_hi); } return temp; } static int gadc_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct gadc_thermal_info *gti = thermal_zone_device_priv(tz); int val; int ret; ret = iio_read_channel_processed(gti->channel, &val); if (ret < 0) return ret; *temp = gadc_thermal_adc_to_temp(gti, val); return 0; } static const struct thermal_zone_device_ops gadc_thermal_ops = { .get_temp = gadc_thermal_get_temp, }; static int gadc_thermal_read_linear_lookup_table(struct device *dev, struct gadc_thermal_info *gti) { struct device_node *np = dev->of_node; enum iio_chan_type chan_type; int ntable; int ret; ntable = of_property_count_elems_of_size(np, "temperature-lookup-table", sizeof(u32)); if (ntable <= 0) { ret = iio_get_channel_type(gti->channel, &chan_type); if (ret || chan_type != IIO_TEMP) dev_notice(dev, "no lookup table, assuming DAC channel returns milliCelcius\n"); return 0; } if (ntable % 2) { dev_err(dev, "Pair of temperature vs ADC read value missing\n"); return -EINVAL; } gti->lookup_table = devm_kcalloc(dev, ntable, sizeof(*gti->lookup_table), GFP_KERNEL); if (!gti->lookup_table) return -ENOMEM; ret = of_property_read_u32_array(np, "temperature-lookup-table", (u32 *)gti->lookup_table, ntable); if (ret < 0) { dev_err(dev, "Failed to read temperature lookup table: %d\n", ret); return ret; } gti->nlookup_table = ntable / 2; return 0; } static int gadc_thermal_probe(struct platform_device *pdev) { struct gadc_thermal_info *gti; int ret; if (!pdev->dev.of_node) { dev_err(&pdev->dev, "Only DT based supported\n"); return -ENODEV; } gti = devm_kzalloc(&pdev->dev, sizeof(*gti), GFP_KERNEL); if (!gti) return -ENOMEM; gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel"); if (IS_ERR(gti->channel)) { ret = PTR_ERR(gti->channel); if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "IIO channel not found: %d\n", ret); return ret; } ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti); if (ret < 0) return ret; gti->dev = &pdev->dev; gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti, &gadc_thermal_ops); if (IS_ERR(gti->tz_dev)) { ret = PTR_ERR(gti->tz_dev); if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n", ret); return ret; } devm_thermal_add_hwmon_sysfs(&pdev->dev, gti->tz_dev); return 0; } static const struct of_device_id of_adc_thermal_match[] = { { .compatible = "generic-adc-thermal", }, {}, }; MODULE_DEVICE_TABLE(of, of_adc_thermal_match); static struct platform_driver gadc_thermal_driver = { .driver = { .name = "generic-adc-thermal", .of_match_table = of_adc_thermal_match, }, .probe = gadc_thermal_probe, }; module_platform_driver(gadc_thermal_driver); MODULE_AUTHOR("Laxman Dewangan <[email protected]>"); MODULE_DESCRIPTION("Generic ADC thermal driver using IIO framework with DT"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/thermal-generic-adc.c
// SPDX-License-Identifier: GPL-2.0 /* * R-Car THS/TSC thermal sensor driver * * Copyright (C) 2012 Renesas Solutions Corp. * Kuninori Morimoto <[email protected]> */ #include <linux/delay.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/thermal.h> #include "thermal_hwmon.h" #define IDLE_INTERVAL 5000 #define COMMON_STR 0x00 #define COMMON_ENR 0x04 #define COMMON_INTMSK 0x0c #define REG_POSNEG 0x20 #define REG_FILONOFF 0x28 #define REG_THSCR 0x2c #define REG_THSSR 0x30 #define REG_INTCTRL 0x34 /* THSCR */ #define CPCTL (1 << 12) /* THSSR */ #define CTEMP 0x3f struct rcar_thermal_common { void __iomem *base; struct device *dev; struct list_head head; spinlock_t lock; }; struct rcar_thermal_chip { unsigned int use_of_thermal : 1; unsigned int has_filonoff : 1; unsigned int irq_per_ch : 1; unsigned int needs_suspend_resume : 1; unsigned int nirqs; unsigned int ctemp_bands; }; static const struct rcar_thermal_chip rcar_thermal = { .use_of_thermal = 0, .has_filonoff = 1, .irq_per_ch = 0, .needs_suspend_resume = 0, .nirqs = 1, .ctemp_bands = 1, }; static const struct rcar_thermal_chip rcar_gen2_thermal = { .use_of_thermal = 1, .has_filonoff = 1, .irq_per_ch = 0, .needs_suspend_resume = 0, .nirqs = 1, .ctemp_bands = 1, }; static const struct rcar_thermal_chip rcar_gen3_thermal = { .use_of_thermal = 1, .has_filonoff = 0, .irq_per_ch = 1, .needs_suspend_resume = 1, /* * The Gen3 chip has 3 interrupts, but this driver uses only 2 * interrupts to detect a temperature change, rise or fall. */ .nirqs = 2, .ctemp_bands = 2, }; struct rcar_thermal_priv { void __iomem *base; struct rcar_thermal_common *common; struct thermal_zone_device *zone; const struct rcar_thermal_chip *chip; struct delayed_work work; struct mutex lock; struct list_head list; int id; }; #define rcar_thermal_for_each_priv(pos, common) \ list_for_each_entry(pos, &common->head, list) #define MCELSIUS(temp) ((temp) * 1000) #define rcar_priv_to_dev(priv) ((priv)->common->dev) #define rcar_has_irq_support(priv) ((priv)->common->base) #define rcar_id_to_shift(priv) ((priv)->id * 8) static const struct of_device_id rcar_thermal_dt_ids[] = { { .compatible = "renesas,rcar-thermal", .data = &rcar_thermal, }, { .compatible = "renesas,rcar-gen2-thermal", .data = &rcar_gen2_thermal, }, { .compatible = "renesas,thermal-r8a774c0", .data = &rcar_gen3_thermal, }, { .compatible = "renesas,thermal-r8a77970", .data = &rcar_gen3_thermal, }, { .compatible = "renesas,thermal-r8a77990", .data = &rcar_gen3_thermal, }, { .compatible = "renesas,thermal-r8a77995", .data = &rcar_gen3_thermal, }, {}, }; MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); /* * basic functions */ #define rcar_thermal_common_read(c, r) \ _rcar_thermal_common_read(c, COMMON_ ##r) static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common, u32 reg) { return ioread32(common->base + reg); } #define rcar_thermal_common_write(c, r, d) \ _rcar_thermal_common_write(c, COMMON_ ##r, d) static void _rcar_thermal_common_write(struct rcar_thermal_common *common, u32 reg, u32 data) { iowrite32(data, common->base + reg); } #define rcar_thermal_common_bset(c, r, m, d) \ _rcar_thermal_common_bset(c, COMMON_ ##r, m, d) static void _rcar_thermal_common_bset(struct rcar_thermal_common *common, u32 reg, u32 mask, u32 data) { u32 val; val = ioread32(common->base + reg); val &= ~mask; val |= (data & mask); iowrite32(val, common->base + reg); } #define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r) static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg) { return ioread32(priv->base + reg); } #define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d) static void _rcar_thermal_write(struct rcar_thermal_priv *priv, u32 reg, u32 data) { iowrite32(data, priv->base + reg); } #define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d) static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg, u32 mask, u32 data) { u32 val; val = ioread32(priv->base + reg); val &= ~mask; val |= (data & mask); iowrite32(val, priv->base + reg); } /* * zone device functions */ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) { struct device *dev = rcar_priv_to_dev(priv); int old, new, ctemp = -EINVAL; unsigned int i; mutex_lock(&priv->lock); /* * TSC decides a value of CPTAP automatically, * and this is the conditions which validate interrupt. */ rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL); old = ~0; for (i = 0; i < 128; i++) { /* * we need to wait 300us after changing comparator offset * to get stable temperature. * see "Usage Notes" on datasheet */ usleep_range(300, 400); new = rcar_thermal_read(priv, THSSR) & CTEMP; if (new == old) { ctemp = new; break; } old = new; } if (ctemp < 0) { dev_err(dev, "thermal sensor was broken\n"); goto err_out_unlock; } /* * enable IRQ */ if (rcar_has_irq_support(priv)) { if (priv->chip->has_filonoff) rcar_thermal_write(priv, FILONOFF, 0); /* enable Rising/Falling edge interrupt */ rcar_thermal_write(priv, POSNEG, 0x1); rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) | ((ctemp - 1) << 0))); } err_out_unlock: mutex_unlock(&priv->lock); return ctemp; } static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv, int *temp) { int ctemp; ctemp = rcar_thermal_update_temp(priv); if (ctemp < 0) return ctemp; /* Guaranteed operating range is -45C to 125C. */ if (priv->chip->ctemp_bands == 1) *temp = MCELSIUS((ctemp * 5) - 65); else if (ctemp < 24) *temp = MCELSIUS(((ctemp * 55) - 720) / 10); else *temp = MCELSIUS((ctemp * 5) - 60); return 0; } static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) { struct rcar_thermal_priv *priv = thermal_zone_device_priv(zone); return rcar_thermal_get_current_temp(priv, temp); } static struct thermal_zone_device_ops rcar_thermal_zone_ops = { .get_temp = rcar_thermal_get_temp, }; static struct thermal_trip trips[] = { { .type = THERMAL_TRIP_CRITICAL, .temperature = 90000 } }; /* * interrupt */ #define rcar_thermal_irq_enable(p) _rcar_thermal_irq_ctrl(p, 1) #define rcar_thermal_irq_disable(p) _rcar_thermal_irq_ctrl(p, 0) static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable) { struct rcar_thermal_common *common = priv->common; unsigned long flags; u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */ if (!rcar_has_irq_support(priv)) return; spin_lock_irqsave(&common->lock, flags); rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask); spin_unlock_irqrestore(&common->lock, flags); } static void rcar_thermal_work(struct work_struct *work) { struct rcar_thermal_priv *priv; int ret; priv = container_of(work, struct rcar_thermal_priv, work.work); ret = rcar_thermal_update_temp(priv); if (ret < 0) return; rcar_thermal_irq_enable(priv); thermal_zone_device_update(priv->zone, THERMAL_EVENT_UNSPECIFIED); } static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status) { struct device *dev = rcar_priv_to_dev(priv); status = (status >> rcar_id_to_shift(priv)) & 0x3; if (status) { dev_dbg(dev, "thermal%d %s%s\n", priv->id, (status & 0x2) ? "Rising " : "", (status & 0x1) ? "Falling" : ""); } return status; } static irqreturn_t rcar_thermal_irq(int irq, void *data) { struct rcar_thermal_common *common = data; struct rcar_thermal_priv *priv; u32 status, mask; spin_lock(&common->lock); mask = rcar_thermal_common_read(common, INTMSK); status = rcar_thermal_common_read(common, STR); rcar_thermal_common_write(common, STR, 0x000F0F0F & mask); spin_unlock(&common->lock); status = status & ~mask; /* * check the status */ rcar_thermal_for_each_priv(priv, common) { if (rcar_thermal_had_changed(priv, status)) { rcar_thermal_irq_disable(priv); queue_delayed_work(system_freezable_wq, &priv->work, msecs_to_jiffies(300)); } } return IRQ_HANDLED; } /* * platform functions */ static int rcar_thermal_remove(struct platform_device *pdev) { struct rcar_thermal_common *common = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; struct rcar_thermal_priv *priv; rcar_thermal_for_each_priv(priv, common) { rcar_thermal_irq_disable(priv); cancel_delayed_work_sync(&priv->work); if (priv->chip->use_of_thermal) thermal_remove_hwmon_sysfs(priv->zone); else thermal_zone_device_unregister(priv->zone); } pm_runtime_put(dev); pm_runtime_disable(dev); return 0; } static int rcar_thermal_probe(struct platform_device *pdev) { struct rcar_thermal_common *common; struct rcar_thermal_priv *priv; struct device *dev = &pdev->dev; struct resource *res; const struct rcar_thermal_chip *chip = of_device_get_match_data(dev); int mres = 0; int i; int ret = -ENODEV; int idle = IDLE_INTERVAL; u32 enr_bits = 0; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); if (!common) return -ENOMEM; platform_set_drvdata(pdev, common); INIT_LIST_HEAD(&common->head); spin_lock_init(&common->lock); common->dev = dev; pm_runtime_enable(dev); pm_runtime_get_sync(dev); for (i = 0; i < chip->nirqs; i++) { int irq; ret = platform_get_irq_optional(pdev, i); if (ret < 0 && ret != -ENXIO) goto error_unregister; if (ret > 0) irq = ret; else break; if (!common->base) { /* * platform has IRQ support. * Then, driver uses common registers * rcar_has_irq_support() will be enabled */ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); common->base = devm_ioremap_resource(dev, res); if (IS_ERR(common->base)) { ret = PTR_ERR(common->base); goto error_unregister; } idle = 0; /* polling delay is not needed */ } ret = devm_request_irq(dev, irq, rcar_thermal_irq, IRQF_SHARED, dev_name(dev), common); if (ret) { dev_err(dev, "irq request failed\n "); goto error_unregister; } /* update ENR bits */ if (chip->irq_per_ch) enr_bits |= 1 << i; } for (i = 0;; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); if (!res) break; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto error_unregister; } priv->base = devm_ioremap_resource(dev, res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto error_unregister; } priv->common = common; priv->id = i; priv->chip = chip; mutex_init(&priv->lock); INIT_LIST_HEAD(&priv->list); INIT_DELAYED_WORK(&priv->work, rcar_thermal_work); ret = rcar_thermal_update_temp(priv); if (ret < 0) goto error_unregister; if (chip->use_of_thermal) { priv->zone = devm_thermal_of_zone_register( dev, i, priv, &rcar_thermal_zone_ops); } else { priv->zone = thermal_zone_device_register_with_trips( "rcar_thermal", trips, ARRAY_SIZE(trips), 0, priv, &rcar_thermal_zone_ops, NULL, 0, idle); ret = thermal_zone_device_enable(priv->zone); if (ret) { thermal_zone_device_unregister(priv->zone); priv->zone = ERR_PTR(ret); } } if (IS_ERR(priv->zone)) { dev_err(dev, "can't register thermal zone\n"); ret = PTR_ERR(priv->zone); priv->zone = NULL; goto error_unregister; } if (chip->use_of_thermal) { ret = thermal_add_hwmon_sysfs(priv->zone); if (ret) goto error_unregister; } rcar_thermal_irq_enable(priv); list_move_tail(&priv->list, &common->head); /* update ENR bits */ if (!chip->irq_per_ch) enr_bits |= 3 << (i * 8); } if (common->base && enr_bits) rcar_thermal_common_write(common, ENR, enr_bits); dev_info(dev, "%d sensor probed\n", i); return 0; error_unregister: rcar_thermal_remove(pdev); return ret; } #ifdef CONFIG_PM_SLEEP static int rcar_thermal_suspend(struct device *dev) { struct rcar_thermal_common *common = dev_get_drvdata(dev); struct rcar_thermal_priv *priv = list_first_entry(&common->head, typeof(*priv), list); if (priv->chip->needs_suspend_resume) { rcar_thermal_common_write(common, ENR, 0); rcar_thermal_irq_disable(priv); rcar_thermal_bset(priv, THSCR, CPCTL, 0); } return 0; } static int rcar_thermal_resume(struct device *dev) { struct rcar_thermal_common *common = dev_get_drvdata(dev); struct rcar_thermal_priv *priv = list_first_entry(&common->head, typeof(*priv), list); int ret; if (priv->chip->needs_suspend_resume) { ret = rcar_thermal_update_temp(priv); if (ret < 0) return ret; rcar_thermal_irq_enable(priv); rcar_thermal_common_write(common, ENR, 0x03); } return 0; } #endif static SIMPLE_DEV_PM_OPS(rcar_thermal_pm_ops, rcar_thermal_suspend, rcar_thermal_resume); static struct platform_driver rcar_thermal_driver = { .driver = { .name = "rcar_thermal", .pm = &rcar_thermal_pm_ops, .of_match_table = rcar_thermal_dt_ids, }, .probe = rcar_thermal_probe, .remove = rcar_thermal_remove, }; module_platform_driver(rcar_thermal_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
linux-master
drivers/thermal/rcar_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * R-Car Gen3 THS thermal sensor driver * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. * * Copyright (C) 2016 Renesas Electronics Corporation. * Copyright (C) 2016 Sang Engineering */ #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/thermal.h> #include "thermal_hwmon.h" /* Register offsets */ #define REG_GEN3_IRQSTR 0x04 #define REG_GEN3_IRQMSK 0x08 #define REG_GEN3_IRQCTL 0x0C #define REG_GEN3_IRQEN 0x10 #define REG_GEN3_IRQTEMP1 0x14 #define REG_GEN3_IRQTEMP2 0x18 #define REG_GEN3_IRQTEMP3 0x1C #define REG_GEN3_THCTR 0x20 #define REG_GEN3_TEMP 0x28 #define REG_GEN3_THCODE1 0x50 #define REG_GEN3_THCODE2 0x54 #define REG_GEN3_THCODE3 0x58 #define REG_GEN3_PTAT1 0x5c #define REG_GEN3_PTAT2 0x60 #define REG_GEN3_PTAT3 0x64 #define REG_GEN3_THSCP 0x68 #define REG_GEN4_THSFMON00 0x180 #define REG_GEN4_THSFMON01 0x184 #define REG_GEN4_THSFMON02 0x188 #define REG_GEN4_THSFMON15 0x1BC #define REG_GEN4_THSFMON16 0x1C0 #define REG_GEN4_THSFMON17 0x1C4 /* IRQ{STR,MSK,EN} bits */ #define IRQ_TEMP1 BIT(0) #define IRQ_TEMP2 BIT(1) #define IRQ_TEMP3 BIT(2) #define IRQ_TEMPD1 BIT(3) #define IRQ_TEMPD2 BIT(4) #define IRQ_TEMPD3 BIT(5) /* THCTR bits */ #define THCTR_PONM BIT(6) #define THCTR_THSST BIT(0) /* THSCP bits */ #define THSCP_COR_PARA_VLD (BIT(15) | BIT(14)) #define CTEMP_MASK 0xFFF #define MCELSIUS(temp) ((temp) * 1000) #define GEN3_FUSE_MASK 0xFFF #define GEN4_FUSE_MASK 0xFFF #define TSC_MAX_NUM 5 /* Structure for thermal temperature calculation */ struct equation_coefs { int a1; int b1; int a2; int b2; }; struct rcar_gen3_thermal_priv; struct rcar_thermal_info { int ths_tj_1; void (*read_fuses)(struct rcar_gen3_thermal_priv *priv); }; struct rcar_gen3_thermal_tsc { void __iomem *base; struct thermal_zone_device *zone; struct equation_coefs coef; int tj_t; int thcode[3]; }; struct rcar_gen3_thermal_priv { struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM]; struct thermal_zone_device_ops ops; unsigned int num_tscs; int ptat[3]; const struct rcar_thermal_info *info; }; static inline u32 rcar_gen3_thermal_read(struct rcar_gen3_thermal_tsc *tsc, u32 reg) { return ioread32(tsc->base + reg); } static inline void rcar_gen3_thermal_write(struct rcar_gen3_thermal_tsc *tsc, u32 reg, u32 data) { iowrite32(data, tsc->base + reg); } /* * Linear approximation for temperature * * [reg] = [temp] * a + b => [temp] = ([reg] - b) / a * * The constants a and b are calculated using two triplets of int values PTAT * and THCODE. PTAT and THCODE can either be read from hardware or use hard * coded values from driver. The formula to calculate a and b are taken from * BSP and sparsely documented and understood. * * Examining the linear formula and the formula used to calculate constants a * and b while knowing that the span for PTAT and THCODE values are between * 0x000 and 0xfff the largest integer possible is 0xfff * 0xfff == 0xffe001. * Integer also needs to be signed so that leaves 7 bits for binary * fixed point scaling. */ #define FIXPT_SHIFT 7 #define FIXPT_INT(_x) ((_x) << FIXPT_SHIFT) #define INT_FIXPT(_x) ((_x) >> FIXPT_SHIFT) #define FIXPT_DIV(_a, _b) DIV_ROUND_CLOSEST(((_a) << FIXPT_SHIFT), (_b)) #define FIXPT_TO_MCELSIUS(_x) ((_x) * 1000 >> FIXPT_SHIFT) #define RCAR3_THERMAL_GRAN 500 /* mili Celsius */ /* no idea where these constants come from */ #define TJ_3 -41 static void rcar_gen3_thermal_calc_coefs(struct rcar_gen3_thermal_priv *priv, struct rcar_gen3_thermal_tsc *tsc, int ths_tj_1) { /* TODO: Find documentation and document constant calculation formula */ /* * Division is not scaled in BSP and if scaled it might overflow * the dividend (4095 * 4095 << 14 > INT_MAX) so keep it unscaled */ tsc->tj_t = (FIXPT_INT((priv->ptat[1] - priv->ptat[2]) * (ths_tj_1 - TJ_3)) / (priv->ptat[0] - priv->ptat[2])) + FIXPT_INT(TJ_3); tsc->coef.a1 = FIXPT_DIV(FIXPT_INT(tsc->thcode[1] - tsc->thcode[2]), tsc->tj_t - FIXPT_INT(TJ_3)); tsc->coef.b1 = FIXPT_INT(tsc->thcode[2]) - tsc->coef.a1 * TJ_3; tsc->coef.a2 = FIXPT_DIV(FIXPT_INT(tsc->thcode[1] - tsc->thcode[0]), tsc->tj_t - FIXPT_INT(ths_tj_1)); tsc->coef.b2 = FIXPT_INT(tsc->thcode[0]) - tsc->coef.a2 * ths_tj_1; } static int rcar_gen3_thermal_round(int temp) { int result, round_offs; round_offs = temp >= 0 ? RCAR3_THERMAL_GRAN / 2 : -RCAR3_THERMAL_GRAN / 2; result = (temp + round_offs) / RCAR3_THERMAL_GRAN; return result * RCAR3_THERMAL_GRAN; } static int rcar_gen3_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct rcar_gen3_thermal_tsc *tsc = thermal_zone_device_priv(tz); int mcelsius, val; int reg; /* Read register and convert to mili Celsius */ reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK; if (reg <= tsc->thcode[1]) val = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b1, tsc->coef.a1); else val = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b2, tsc->coef.a2); mcelsius = FIXPT_TO_MCELSIUS(val); /* Guaranteed operating range is -40C to 125C. */ /* Round value to device granularity setting */ *temp = rcar_gen3_thermal_round(mcelsius); return 0; } static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc, int mcelsius) { int celsius, val; celsius = DIV_ROUND_CLOSEST(mcelsius, 1000); if (celsius <= INT_FIXPT(tsc->tj_t)) val = celsius * tsc->coef.a1 + tsc->coef.b1; else val = celsius * tsc->coef.a2 + tsc->coef.b2; return INT_FIXPT(val); } static int rcar_gen3_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { struct rcar_gen3_thermal_tsc *tsc = thermal_zone_device_priv(tz); u32 irqmsk = 0; if (low != -INT_MAX) { irqmsk |= IRQ_TEMPD1; rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP1, rcar_gen3_thermal_mcelsius_to_temp(tsc, low)); } if (high != INT_MAX) { irqmsk |= IRQ_TEMP2; rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP2, rcar_gen3_thermal_mcelsius_to_temp(tsc, high)); } rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, irqmsk); return 0; } static const struct thermal_zone_device_ops rcar_gen3_tz_of_ops = { .get_temp = rcar_gen3_thermal_get_temp, .set_trips = rcar_gen3_thermal_set_trips, }; static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data) { struct rcar_gen3_thermal_priv *priv = data; unsigned int i; u32 status; for (i = 0; i < priv->num_tscs; i++) { status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR); rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0); if (status && priv->tscs[i]->zone) thermal_zone_device_update(priv->tscs[i]->zone, THERMAL_EVENT_UNSPECIFIED); } return IRQ_HANDLED; } static void rcar_gen3_thermal_read_fuses_gen3(struct rcar_gen3_thermal_priv *priv) { unsigned int i; /* * Set the pseudo calibration points with fused values. * PTAT is shared between all TSCs but only fused for the first * TSC while THCODEs are fused for each TSC. */ priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT1) & GEN3_FUSE_MASK; priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT2) & GEN3_FUSE_MASK; priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT3) & GEN3_FUSE_MASK; for (i = 0; i < priv->num_tscs; i++) { struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE1) & GEN3_FUSE_MASK; tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE2) & GEN3_FUSE_MASK; tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE3) & GEN3_FUSE_MASK; } } static void rcar_gen3_thermal_read_fuses_gen4(struct rcar_gen3_thermal_priv *priv) { unsigned int i; /* * Set the pseudo calibration points with fused values. * PTAT is shared between all TSCs but only fused for the first * TSC while THCODEs are fused for each TSC. */ priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON16) & GEN4_FUSE_MASK; priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON17) & GEN4_FUSE_MASK; priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON15) & GEN4_FUSE_MASK; for (i = 0; i < priv->num_tscs; i++) { struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON01) & GEN4_FUSE_MASK; tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON02) & GEN4_FUSE_MASK; tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON00) & GEN4_FUSE_MASK; } } static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv) { unsigned int i; u32 thscp; /* If fuses are not set, fallback to pseudo values. */ thscp = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_THSCP); if (!priv->info->read_fuses || (thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) { /* Default THCODE values in case FUSEs are not set. */ static const int thcodes[TSC_MAX_NUM][3] = { { 3397, 2800, 2221 }, { 3393, 2795, 2216 }, { 3389, 2805, 2237 }, { 3415, 2694, 2195 }, { 3356, 2724, 2244 }, }; priv->ptat[0] = 2631; priv->ptat[1] = 1509; priv->ptat[2] = 435; for (i = 0; i < priv->num_tscs; i++) { struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; tsc->thcode[0] = thcodes[i][0]; tsc->thcode[1] = thcodes[i][1]; tsc->thcode[2] = thcodes[i][2]; } return false; } priv->info->read_fuses(priv); return true; } static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_priv *priv, struct rcar_gen3_thermal_tsc *tsc) { u32 reg_val; reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR); reg_val &= ~THCTR_PONM; rcar_gen3_thermal_write(tsc, REG_GEN3_THCTR, reg_val); usleep_range(1000, 2000); rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0); rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0); if (priv->ops.set_trips) rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN, IRQ_TEMPD1 | IRQ_TEMP2); reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR); reg_val |= THCTR_THSST; rcar_gen3_thermal_write(tsc, REG_GEN3_THCTR, reg_val); usleep_range(1000, 2000); } static const struct rcar_thermal_info rcar_m3w_thermal_info = { .ths_tj_1 = 116, .read_fuses = rcar_gen3_thermal_read_fuses_gen3, }; static const struct rcar_thermal_info rcar_gen3_thermal_info = { .ths_tj_1 = 126, .read_fuses = rcar_gen3_thermal_read_fuses_gen3, }; static const struct rcar_thermal_info rcar_gen4_thermal_info = { .ths_tj_1 = 126, .read_fuses = rcar_gen3_thermal_read_fuses_gen4, }; static const struct of_device_id rcar_gen3_thermal_dt_ids[] = { { .compatible = "renesas,r8a774a1-thermal", .data = &rcar_m3w_thermal_info, }, { .compatible = "renesas,r8a774b1-thermal", .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a774e1-thermal", .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a7795-thermal", .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a7796-thermal", .data = &rcar_m3w_thermal_info, }, { .compatible = "renesas,r8a77961-thermal", .data = &rcar_m3w_thermal_info, }, { .compatible = "renesas,r8a77965-thermal", .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a77980-thermal", .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a779a0-thermal", .data = &rcar_gen3_thermal_info, }, { .compatible = "renesas,r8a779f0-thermal", .data = &rcar_gen4_thermal_info, }, { .compatible = "renesas,r8a779g0-thermal", .data = &rcar_gen4_thermal_info, }, {}, }; MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); static int rcar_gen3_thermal_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; pm_runtime_put(dev); pm_runtime_disable(dev); return 0; } static void rcar_gen3_hwmon_action(void *data) { struct thermal_zone_device *zone = data; thermal_remove_hwmon_sysfs(zone); } static int rcar_gen3_thermal_request_irqs(struct rcar_gen3_thermal_priv *priv, struct platform_device *pdev) { struct device *dev = &pdev->dev; unsigned int i; char *irqname; int ret, irq; for (i = 0; i < 2; i++) { irq = platform_get_irq_optional(pdev, i); if (irq < 0) return irq; irqname = devm_kasprintf(dev, GFP_KERNEL, "%s:ch%d", dev_name(dev), i); if (!irqname) return -ENOMEM; ret = devm_request_threaded_irq(dev, irq, NULL, rcar_gen3_thermal_irq, IRQF_ONESHOT, irqname, priv); if (ret) return ret; } return 0; } static int rcar_gen3_thermal_probe(struct platform_device *pdev) { struct rcar_gen3_thermal_priv *priv; struct device *dev = &pdev->dev; struct resource *res; struct thermal_zone_device *zone; unsigned int i; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->ops = rcar_gen3_tz_of_ops; priv->info = of_device_get_match_data(dev); platform_set_drvdata(pdev, priv); if (rcar_gen3_thermal_request_irqs(priv, pdev)) priv->ops.set_trips = NULL; pm_runtime_enable(dev); pm_runtime_get_sync(dev); for (i = 0; i < TSC_MAX_NUM; i++) { struct rcar_gen3_thermal_tsc *tsc; res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) break; tsc = devm_kzalloc(dev, sizeof(*tsc), GFP_KERNEL); if (!tsc) { ret = -ENOMEM; goto error_unregister; } tsc->base = devm_ioremap_resource(dev, res); if (IS_ERR(tsc->base)) { ret = PTR_ERR(tsc->base); goto error_unregister; } priv->tscs[i] = tsc; } priv->num_tscs = i; if (!rcar_gen3_thermal_read_fuses(priv)) dev_info(dev, "No calibration values fused, fallback to driver values\n"); for (i = 0; i < priv->num_tscs; i++) { struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; rcar_gen3_thermal_init(priv, tsc); rcar_gen3_thermal_calc_coefs(priv, tsc, priv->info->ths_tj_1); zone = devm_thermal_of_zone_register(dev, i, tsc, &priv->ops); if (IS_ERR(zone)) { dev_err(dev, "Sensor %u: Can't register thermal zone\n", i); ret = PTR_ERR(zone); goto error_unregister; } tsc->zone = zone; ret = thermal_add_hwmon_sysfs(tsc->zone); if (ret) goto error_unregister; ret = devm_add_action_or_reset(dev, rcar_gen3_hwmon_action, zone); if (ret) goto error_unregister; ret = thermal_zone_get_num_trips(tsc->zone); if (ret < 0) goto error_unregister; dev_info(dev, "Sensor %u: Loaded %d trip points\n", i, ret); } if (!priv->num_tscs) { ret = -ENODEV; goto error_unregister; } return 0; error_unregister: rcar_gen3_thermal_remove(pdev); return ret; } static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev) { struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev); unsigned int i; for (i = 0; i < priv->num_tscs; i++) { struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; rcar_gen3_thermal_init(priv, tsc); } return 0; } static SIMPLE_DEV_PM_OPS(rcar_gen3_thermal_pm_ops, NULL, rcar_gen3_thermal_resume); static struct platform_driver rcar_gen3_thermal_driver = { .driver = { .name = "rcar_gen3_thermal", .pm = &rcar_gen3_thermal_pm_ops, .of_match_table = rcar_gen3_thermal_dt_ids, }, .probe = rcar_gen3_thermal_probe, .remove = rcar_gen3_thermal_remove, }; module_platform_driver(rcar_gen3_thermal_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("R-Car Gen3 THS thermal sensor driver"); MODULE_AUTHOR("Wolfram Sang <[email protected]>");
linux-master
drivers/thermal/rcar_gen3_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Junction temperature thermal driver for Maxim Max77620. * * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Author: Laxman Dewangan <[email protected]> * Mallikarjun Kasoju <[email protected]> */ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/mfd/max77620.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/thermal.h> #define MAX77620_NORMAL_OPERATING_TEMP 100000 #define MAX77620_TJALARM1_TEMP 120000 #define MAX77620_TJALARM2_TEMP 140000 struct max77620_therm_info { struct device *dev; struct regmap *rmap; struct thermal_zone_device *tz_device; int irq_tjalarm1; int irq_tjalarm2; }; /** * max77620_thermal_read_temp: Read PMIC die temperatue. * @data: Device specific data. * @temp: Temperature in millidegrees Celsius * * The actual temperature of PMIC die is not available from PMIC. * PMIC only tells the status if it has crossed or not the threshold level * of 120degC or 140degC. * If threshold has not been crossed then assume die temperature as 100degC * else 120degC or 140deG based on the PMIC die temp threshold status. * * Return 0 on success otherwise error number to show reason of failure. */ static int max77620_thermal_read_temp(struct thermal_zone_device *tz, int *temp) { struct max77620_therm_info *mtherm = thermal_zone_device_priv(tz); unsigned int val; int ret; ret = regmap_read(mtherm->rmap, MAX77620_REG_STATLBT, &val); if (ret < 0) return ret; if (val & MAX77620_IRQ_TJALRM2_MASK) *temp = MAX77620_TJALARM2_TEMP; else if (val & MAX77620_IRQ_TJALRM1_MASK) *temp = MAX77620_TJALARM1_TEMP; else *temp = MAX77620_NORMAL_OPERATING_TEMP; return 0; } static const struct thermal_zone_device_ops max77620_thermal_ops = { .get_temp = max77620_thermal_read_temp, }; static irqreturn_t max77620_thermal_irq(int irq, void *data) { struct max77620_therm_info *mtherm = data; if (irq == mtherm->irq_tjalarm1) dev_warn(mtherm->dev, "Junction Temp Alarm1(120C) occurred\n"); else if (irq == mtherm->irq_tjalarm2) dev_crit(mtherm->dev, "Junction Temp Alarm2(140C) occurred\n"); thermal_zone_device_update(mtherm->tz_device, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static int max77620_thermal_probe(struct platform_device *pdev) { struct max77620_therm_info *mtherm; int ret; mtherm = devm_kzalloc(&pdev->dev, sizeof(*mtherm), GFP_KERNEL); if (!mtherm) return -ENOMEM; mtherm->irq_tjalarm1 = platform_get_irq(pdev, 0); mtherm->irq_tjalarm2 = platform_get_irq(pdev, 1); if ((mtherm->irq_tjalarm1 < 0) || (mtherm->irq_tjalarm2 < 0)) { dev_err(&pdev->dev, "Alarm irq number not available\n"); return -EINVAL; } mtherm->dev = &pdev->dev; mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL); if (!mtherm->rmap) { dev_err(&pdev->dev, "Failed to get parent regmap\n"); return -ENODEV; } /* * The reference taken to the parent's node which will be balanced on * reprobe or on platform-device release. */ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent); mtherm->tz_device = devm_thermal_of_zone_register(&pdev->dev, 0, mtherm, &max77620_thermal_ops); if (IS_ERR(mtherm->tz_device)) { ret = PTR_ERR(mtherm->tz_device); dev_err(&pdev->dev, "Failed to register thermal zone: %d\n", ret); return ret; } ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm1, NULL, max77620_thermal_irq, IRQF_ONESHOT | IRQF_SHARED, dev_name(&pdev->dev), mtherm); if (ret < 0) { dev_err(&pdev->dev, "Failed to request irq1: %d\n", ret); return ret; } ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm2, NULL, max77620_thermal_irq, IRQF_ONESHOT | IRQF_SHARED, dev_name(&pdev->dev), mtherm); if (ret < 0) { dev_err(&pdev->dev, "Failed to request irq2: %d\n", ret); return ret; } return 0; } static struct platform_device_id max77620_thermal_devtype[] = { { .name = "max77620-thermal", }, {}, }; MODULE_DEVICE_TABLE(platform, max77620_thermal_devtype); static struct platform_driver max77620_thermal_driver = { .driver = { .name = "max77620-thermal", }, .probe = max77620_thermal_probe, .id_table = max77620_thermal_devtype, }; module_platform_driver(max77620_thermal_driver); MODULE_DESCRIPTION("Max77620 Junction temperature Thermal driver"); MODULE_AUTHOR("Laxman Dewangan <[email protected]>"); MODULE_AUTHOR("Mallikarjun Kasoju <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/max77620_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell EBU Armada SoCs thermal sensor driver * * Copyright (C) 2013 Marvell */ #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/thermal.h> #include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/interrupt.h> /* Thermal Manager Control and Status Register */ #define PMU_TDC0_SW_RST_MASK (0x1 << 1) #define PMU_TM_DISABLE_OFFS 0 #define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS) #define PMU_TDC0_REF_CAL_CNT_OFFS 11 #define PMU_TDC0_REF_CAL_CNT_MASK (0x1ff << PMU_TDC0_REF_CAL_CNT_OFFS) #define PMU_TDC0_OTF_CAL_MASK (0x1 << 30) #define PMU_TDC0_START_CAL_MASK (0x1 << 25) #define A375_UNIT_CONTROL_SHIFT 27 #define A375_UNIT_CONTROL_MASK 0x7 #define A375_READOUT_INVERT BIT(15) #define A375_HW_RESETn BIT(8) /* Errata fields */ #define CONTROL0_TSEN_TC_TRIM_MASK 0x7 #define CONTROL0_TSEN_TC_TRIM_VAL 0x3 #define CONTROL0_TSEN_START BIT(0) #define CONTROL0_TSEN_RESET BIT(1) #define CONTROL0_TSEN_ENABLE BIT(2) #define CONTROL0_TSEN_AVG_BYPASS BIT(6) #define CONTROL0_TSEN_CHAN_SHIFT 13 #define CONTROL0_TSEN_CHAN_MASK 0xF #define CONTROL0_TSEN_OSR_SHIFT 24 #define CONTROL0_TSEN_OSR_MAX 0x3 #define CONTROL0_TSEN_MODE_SHIFT 30 #define CONTROL0_TSEN_MODE_EXTERNAL 0x2 #define CONTROL0_TSEN_MODE_MASK 0x3 #define CONTROL1_TSEN_AVG_MASK 0x7 #define CONTROL1_EXT_TSEN_SW_RESET BIT(7) #define CONTROL1_EXT_TSEN_HW_RESETn BIT(8) #define CONTROL1_TSEN_INT_EN BIT(25) #define CONTROL1_TSEN_SELECT_OFF 21 #define CONTROL1_TSEN_SELECT_MASK 0x3 #define STATUS_POLL_PERIOD_US 1000 #define STATUS_POLL_TIMEOUT_US 100000 #define OVERHEAT_INT_POLL_DELAY_MS 1000 struct armada_thermal_data; /* Marvell EBU Thermal Sensor Dev Structure */ struct armada_thermal_priv { struct device *dev; struct regmap *syscon; char zone_name[THERMAL_NAME_LENGTH]; /* serialize temperature reads/updates */ struct mutex update_lock; struct armada_thermal_data *data; struct thermal_zone_device *overheat_sensor; int interrupt_source; int current_channel; long current_threshold; long current_hysteresis; }; struct armada_thermal_data { /* Initialize the thermal IC */ void (*init)(struct platform_device *pdev, struct armada_thermal_priv *priv); /* Formula coeficients: temp = (b - m * reg) / div */ s64 coef_b; s64 coef_m; u32 coef_div; bool inverted; bool signed_sample; /* Register shift and mask to access the sensor temperature */ unsigned int temp_shift; unsigned int temp_mask; unsigned int thresh_shift; unsigned int hyst_shift; unsigned int hyst_mask; u32 is_valid_bit; /* Syscon access */ unsigned int syscon_control0_off; unsigned int syscon_control1_off; unsigned int syscon_status_off; unsigned int dfx_irq_cause_off; unsigned int dfx_irq_mask_off; unsigned int dfx_overheat_irq; unsigned int dfx_server_irq_mask_off; unsigned int dfx_server_irq_en; /* One sensor is in the thermal IC, the others are in the CPUs if any */ unsigned int cpu_nr; }; struct armada_drvdata { enum drvtype { LEGACY, SYSCON } type; union { struct armada_thermal_priv *priv; struct thermal_zone_device *tz; } data; }; /* * struct armada_thermal_sensor - hold the information of one thermal sensor * @thermal: pointer to the local private structure * @tzd: pointer to the thermal zone device * @id: identifier of the thermal sensor */ struct armada_thermal_sensor { struct armada_thermal_priv *priv; int id; }; static void armadaxp_init(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; regmap_read(priv->syscon, data->syscon_control1_off, &reg); reg |= PMU_TDC0_OTF_CAL_MASK; /* Reference calibration value */ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS); /* Reset the sensor */ reg |= PMU_TDC0_SW_RST_MASK; regmap_write(priv->syscon, data->syscon_control1_off, reg); reg &= ~PMU_TDC0_SW_RST_MASK; regmap_write(priv->syscon, data->syscon_control1_off, reg); /* Enable the sensor */ regmap_read(priv->syscon, data->syscon_status_off, &reg); reg &= ~PMU_TM_DISABLE_MASK; regmap_write(priv->syscon, data->syscon_status_off, reg); } static void armada370_init(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; regmap_read(priv->syscon, data->syscon_control1_off, &reg); reg |= PMU_TDC0_OTF_CAL_MASK; /* Reference calibration value */ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS); /* Reset the sensor */ reg &= ~PMU_TDC0_START_CAL_MASK; regmap_write(priv->syscon, data->syscon_control1_off, reg); msleep(10); } static void armada375_init(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; regmap_read(priv->syscon, data->syscon_control1_off, &reg); reg &= ~(A375_UNIT_CONTROL_MASK << A375_UNIT_CONTROL_SHIFT); reg &= ~A375_READOUT_INVERT; reg &= ~A375_HW_RESETn; regmap_write(priv->syscon, data->syscon_control1_off, reg); msleep(20); reg |= A375_HW_RESETn; regmap_write(priv->syscon, data->syscon_control1_off, reg); msleep(50); } static int armada_wait_sensor_validity(struct armada_thermal_priv *priv) { u32 reg; return regmap_read_poll_timeout(priv->syscon, priv->data->syscon_status_off, reg, reg & priv->data->is_valid_bit, STATUS_POLL_PERIOD_US, STATUS_POLL_TIMEOUT_US); } static void armada380_init(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; /* Disable the HW/SW reset */ regmap_read(priv->syscon, data->syscon_control1_off, &reg); reg |= CONTROL1_EXT_TSEN_HW_RESETn; reg &= ~CONTROL1_EXT_TSEN_SW_RESET; regmap_write(priv->syscon, data->syscon_control1_off, reg); /* Set Tsen Tc Trim to correct default value (errata #132698) */ regmap_read(priv->syscon, data->syscon_control0_off, &reg); reg &= ~CONTROL0_TSEN_TC_TRIM_MASK; reg |= CONTROL0_TSEN_TC_TRIM_VAL; regmap_write(priv->syscon, data->syscon_control0_off, reg); } static void armada_ap80x_init(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; regmap_read(priv->syscon, data->syscon_control0_off, &reg); reg &= ~CONTROL0_TSEN_RESET; reg |= CONTROL0_TSEN_START | CONTROL0_TSEN_ENABLE; /* Sample every ~2ms */ reg |= CONTROL0_TSEN_OSR_MAX << CONTROL0_TSEN_OSR_SHIFT; /* Enable average (2 samples by default) */ reg &= ~CONTROL0_TSEN_AVG_BYPASS; regmap_write(priv->syscon, data->syscon_control0_off, reg); } static void armada_cp110_init(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; armada380_init(pdev, priv); /* Sample every ~2ms */ regmap_read(priv->syscon, data->syscon_control0_off, &reg); reg |= CONTROL0_TSEN_OSR_MAX << CONTROL0_TSEN_OSR_SHIFT; regmap_write(priv->syscon, data->syscon_control0_off, reg); /* Average the output value over 2^1 = 2 samples */ regmap_read(priv->syscon, data->syscon_control1_off, &reg); reg &= ~CONTROL1_TSEN_AVG_MASK; reg |= 1; regmap_write(priv->syscon, data->syscon_control1_off, reg); } static bool armada_is_valid(struct armada_thermal_priv *priv) { u32 reg; if (!priv->data->is_valid_bit) return true; regmap_read(priv->syscon, priv->data->syscon_status_off, &reg); return reg & priv->data->is_valid_bit; } static void armada_enable_overheat_interrupt(struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; /* Clear DFX temperature IRQ cause */ regmap_read(priv->syscon, data->dfx_irq_cause_off, &reg); /* Enable DFX Temperature IRQ */ regmap_read(priv->syscon, data->dfx_irq_mask_off, &reg); reg |= data->dfx_overheat_irq; regmap_write(priv->syscon, data->dfx_irq_mask_off, reg); /* Enable DFX server IRQ */ regmap_read(priv->syscon, data->dfx_server_irq_mask_off, &reg); reg |= data->dfx_server_irq_en; regmap_write(priv->syscon, data->dfx_server_irq_mask_off, reg); /* Enable overheat interrupt */ regmap_read(priv->syscon, data->syscon_control1_off, &reg); reg |= CONTROL1_TSEN_INT_EN; regmap_write(priv->syscon, data->syscon_control1_off, reg); } static void __maybe_unused armada_disable_overheat_interrupt(struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; u32 reg; regmap_read(priv->syscon, data->syscon_control1_off, &reg); reg &= ~CONTROL1_TSEN_INT_EN; regmap_write(priv->syscon, data->syscon_control1_off, reg); } /* There is currently no board with more than one sensor per channel */ static int armada_select_channel(struct armada_thermal_priv *priv, int channel) { struct armada_thermal_data *data = priv->data; u32 ctrl0; if (channel < 0 || channel > priv->data->cpu_nr) return -EINVAL; if (priv->current_channel == channel) return 0; /* Stop the measurements */ regmap_read(priv->syscon, data->syscon_control0_off, &ctrl0); ctrl0 &= ~CONTROL0_TSEN_START; regmap_write(priv->syscon, data->syscon_control0_off, ctrl0); /* Reset the mode, internal sensor will be automatically selected */ ctrl0 &= ~(CONTROL0_TSEN_MODE_MASK << CONTROL0_TSEN_MODE_SHIFT); /* Other channels are external and should be selected accordingly */ if (channel) { /* Change the mode to external */ ctrl0 |= CONTROL0_TSEN_MODE_EXTERNAL << CONTROL0_TSEN_MODE_SHIFT; /* Select the sensor */ ctrl0 &= ~(CONTROL0_TSEN_CHAN_MASK << CONTROL0_TSEN_CHAN_SHIFT); ctrl0 |= (channel - 1) << CONTROL0_TSEN_CHAN_SHIFT; } /* Actually set the mode/channel */ regmap_write(priv->syscon, data->syscon_control0_off, ctrl0); priv->current_channel = channel; /* Re-start the measurements */ ctrl0 |= CONTROL0_TSEN_START; regmap_write(priv->syscon, data->syscon_control0_off, ctrl0); /* * The IP has a latency of ~15ms, so after updating the selected source, * we must absolutely wait for the sensor validity bit to ensure we read * actual data. */ if (armada_wait_sensor_validity(priv)) return -EIO; return 0; } static int armada_read_sensor(struct armada_thermal_priv *priv, int *temp) { u32 reg, div; s64 sample, b, m; regmap_read(priv->syscon, priv->data->syscon_status_off, &reg); reg = (reg >> priv->data->temp_shift) & priv->data->temp_mask; if (priv->data->signed_sample) /* The most significant bit is the sign bit */ sample = sign_extend32(reg, fls(priv->data->temp_mask) - 1); else sample = reg; /* Get formula coeficients */ b = priv->data->coef_b; m = priv->data->coef_m; div = priv->data->coef_div; if (priv->data->inverted) *temp = div_s64((m * sample) - b, div); else *temp = div_s64(b - (m * sample), div); return 0; } static int armada_get_temp_legacy(struct thermal_zone_device *thermal, int *temp) { struct armada_thermal_priv *priv = thermal_zone_device_priv(thermal); int ret; /* Valid check */ if (!armada_is_valid(priv)) return -EIO; /* Do the actual reading */ ret = armada_read_sensor(priv, temp); return ret; } static struct thermal_zone_device_ops legacy_ops = { .get_temp = armada_get_temp_legacy, }; static int armada_get_temp(struct thermal_zone_device *tz, int *temp) { struct armada_thermal_sensor *sensor = thermal_zone_device_priv(tz); struct armada_thermal_priv *priv = sensor->priv; int ret; mutex_lock(&priv->update_lock); /* Select the desired channel */ ret = armada_select_channel(priv, sensor->id); if (ret) goto unlock_mutex; /* Do the actual reading */ ret = armada_read_sensor(priv, temp); if (ret) goto unlock_mutex; /* * Select back the interrupt source channel from which a potential * critical trip point has been set. */ ret = armada_select_channel(priv, priv->interrupt_source); unlock_mutex: mutex_unlock(&priv->update_lock); return ret; } static const struct thermal_zone_device_ops of_ops = { .get_temp = armada_get_temp, }; static unsigned int armada_mc_to_reg_temp(struct armada_thermal_data *data, unsigned int temp_mc) { s64 b = data->coef_b; s64 m = data->coef_m; s64 div = data->coef_div; unsigned int sample; if (data->inverted) sample = div_s64(((temp_mc * div) + b), m); else sample = div_s64((b - (temp_mc * div)), m); return sample & data->temp_mask; } /* * The documentation states: * high/low watermark = threshold +/- 0.4761 * 2^(hysteresis + 2) * which is the mathematical derivation for: * 0x0 <=> 1.9°C, 0x1 <=> 3.8°C, 0x2 <=> 7.6°C, 0x3 <=> 15.2°C */ static unsigned int hyst_levels_mc[] = {1900, 3800, 7600, 15200}; static unsigned int armada_mc_to_reg_hyst(struct armada_thermal_data *data, unsigned int hyst_mc) { int i; /* * We will always take the smallest possible hysteresis to avoid risking * the hardware integrity by enlarging the threshold by +8°C in the * worst case. */ for (i = ARRAY_SIZE(hyst_levels_mc) - 1; i > 0; i--) if (hyst_mc >= hyst_levels_mc[i]) break; return i & data->hyst_mask; } static void armada_set_overheat_thresholds(struct armada_thermal_priv *priv, int thresh_mc, int hyst_mc) { struct armada_thermal_data *data = priv->data; unsigned int threshold = armada_mc_to_reg_temp(data, thresh_mc); unsigned int hysteresis = armada_mc_to_reg_hyst(data, hyst_mc); u32 ctrl1; regmap_read(priv->syscon, data->syscon_control1_off, &ctrl1); /* Set Threshold */ if (thresh_mc >= 0) { ctrl1 &= ~(data->temp_mask << data->thresh_shift); ctrl1 |= threshold << data->thresh_shift; priv->current_threshold = thresh_mc; } /* Set Hysteresis */ if (hyst_mc >= 0) { ctrl1 &= ~(data->hyst_mask << data->hyst_shift); ctrl1 |= hysteresis << data->hyst_shift; priv->current_hysteresis = hyst_mc; } regmap_write(priv->syscon, data->syscon_control1_off, ctrl1); } static irqreturn_t armada_overheat_isr(int irq, void *blob) { /* * Disable the IRQ and continue in thread context (thermal core * notification and temperature monitoring). */ disable_irq_nosync(irq); return IRQ_WAKE_THREAD; } static irqreturn_t armada_overheat_isr_thread(int irq, void *blob) { struct armada_thermal_priv *priv = blob; int low_threshold = priv->current_threshold - priv->current_hysteresis; int temperature; u32 dummy; int ret; /* Notify the core in thread context */ thermal_zone_device_update(priv->overheat_sensor, THERMAL_EVENT_UNSPECIFIED); /* * The overheat interrupt must be cleared by reading the DFX interrupt * cause _after_ the temperature has fallen down to the low threshold. * Otherwise future interrupts might not be served. */ do { msleep(OVERHEAT_INT_POLL_DELAY_MS); mutex_lock(&priv->update_lock); ret = armada_read_sensor(priv, &temperature); mutex_unlock(&priv->update_lock); if (ret) goto enable_irq; } while (temperature >= low_threshold); regmap_read(priv->syscon, priv->data->dfx_irq_cause_off, &dummy); /* Notify the thermal core that the temperature is acceptable again */ thermal_zone_device_update(priv->overheat_sensor, THERMAL_EVENT_UNSPECIFIED); enable_irq: enable_irq(irq); return IRQ_HANDLED; } static const struct armada_thermal_data armadaxp_data = { .init = armadaxp_init, .temp_shift = 10, .temp_mask = 0x1ff, .coef_b = 3153000000ULL, .coef_m = 10000000ULL, .coef_div = 13825, .syscon_status_off = 0xb0, .syscon_control1_off = 0x2d0, }; static const struct armada_thermal_data armada370_data = { .init = armada370_init, .is_valid_bit = BIT(9), .temp_shift = 10, .temp_mask = 0x1ff, .coef_b = 3153000000ULL, .coef_m = 10000000ULL, .coef_div = 13825, .syscon_status_off = 0x0, .syscon_control1_off = 0x4, }; static const struct armada_thermal_data armada375_data = { .init = armada375_init, .is_valid_bit = BIT(10), .temp_shift = 0, .temp_mask = 0x1ff, .coef_b = 3171900000ULL, .coef_m = 10000000ULL, .coef_div = 13616, .syscon_status_off = 0x78, .syscon_control0_off = 0x7c, .syscon_control1_off = 0x80, }; static const struct armada_thermal_data armada380_data = { .init = armada380_init, .is_valid_bit = BIT(10), .temp_shift = 0, .temp_mask = 0x3ff, .coef_b = 1172499100ULL, .coef_m = 2000096ULL, .coef_div = 4201, .inverted = true, .syscon_control0_off = 0x70, .syscon_control1_off = 0x74, .syscon_status_off = 0x78, }; static const struct armada_thermal_data armada_ap806_data = { .init = armada_ap80x_init, .is_valid_bit = BIT(16), .temp_shift = 0, .temp_mask = 0x3ff, .thresh_shift = 3, .hyst_shift = 19, .hyst_mask = 0x3, .coef_b = -150000LL, .coef_m = 423ULL, .coef_div = 1, .inverted = true, .signed_sample = true, .syscon_control0_off = 0x84, .syscon_control1_off = 0x88, .syscon_status_off = 0x8C, .dfx_irq_cause_off = 0x108, .dfx_irq_mask_off = 0x10C, .dfx_overheat_irq = BIT(22), .dfx_server_irq_mask_off = 0x104, .dfx_server_irq_en = BIT(1), .cpu_nr = 4, }; static const struct armada_thermal_data armada_ap807_data = { .init = armada_ap80x_init, .is_valid_bit = BIT(16), .temp_shift = 0, .temp_mask = 0x3ff, .thresh_shift = 3, .hyst_shift = 19, .hyst_mask = 0x3, .coef_b = -128900LL, .coef_m = 394ULL, .coef_div = 1, .inverted = true, .signed_sample = true, .syscon_control0_off = 0x84, .syscon_control1_off = 0x88, .syscon_status_off = 0x8C, .dfx_irq_cause_off = 0x108, .dfx_irq_mask_off = 0x10C, .dfx_overheat_irq = BIT(22), .dfx_server_irq_mask_off = 0x104, .dfx_server_irq_en = BIT(1), .cpu_nr = 4, }; static const struct armada_thermal_data armada_cp110_data = { .init = armada_cp110_init, .is_valid_bit = BIT(10), .temp_shift = 0, .temp_mask = 0x3ff, .thresh_shift = 16, .hyst_shift = 26, .hyst_mask = 0x3, .coef_b = 1172499100ULL, .coef_m = 2000096ULL, .coef_div = 4201, .inverted = true, .syscon_control0_off = 0x70, .syscon_control1_off = 0x74, .syscon_status_off = 0x78, .dfx_irq_cause_off = 0x108, .dfx_irq_mask_off = 0x10C, .dfx_overheat_irq = BIT(20), .dfx_server_irq_mask_off = 0x104, .dfx_server_irq_en = BIT(1), }; static const struct of_device_id armada_thermal_id_table[] = { { .compatible = "marvell,armadaxp-thermal", .data = &armadaxp_data, }, { .compatible = "marvell,armada370-thermal", .data = &armada370_data, }, { .compatible = "marvell,armada375-thermal", .data = &armada375_data, }, { .compatible = "marvell,armada380-thermal", .data = &armada380_data, }, { .compatible = "marvell,armada-ap806-thermal", .data = &armada_ap806_data, }, { .compatible = "marvell,armada-ap807-thermal", .data = &armada_ap807_data, }, { .compatible = "marvell,armada-cp110-thermal", .data = &armada_cp110_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, armada_thermal_id_table); static const struct regmap_config armada_thermal_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .fast_io = true, }; static int armada_thermal_probe_legacy(struct platform_device *pdev, struct armada_thermal_priv *priv) { struct armada_thermal_data *data = priv->data; void __iomem *base; /* First memory region points towards the status register */ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(base)) return PTR_ERR(base); /* * Fix up from the old individual DT register specification to * cover all the registers. We do this by adjusting the ioremap() * result, which should be fine as ioremap() deals with pages. * However, validate that we do not cross a page boundary while * making this adjustment. */ if (((unsigned long)base & ~PAGE_MASK) < data->syscon_status_off) return -EINVAL; base -= data->syscon_status_off; priv->syscon = devm_regmap_init_mmio(&pdev->dev, base, &armada_thermal_regmap_config); return PTR_ERR_OR_ZERO(priv->syscon); } static int armada_thermal_probe_syscon(struct platform_device *pdev, struct armada_thermal_priv *priv) { priv->syscon = syscon_node_to_regmap(pdev->dev.parent->of_node); return PTR_ERR_OR_ZERO(priv->syscon); } static void armada_set_sane_name(struct platform_device *pdev, struct armada_thermal_priv *priv) { const char *name = dev_name(&pdev->dev); char *insane_char; if (strlen(name) > THERMAL_NAME_LENGTH) { /* * When inside a system controller, the device name has the * form: f06f8000.system-controller:ap-thermal so stripping * after the ':' should give us a shorter but meaningful name. */ name = strrchr(name, ':'); if (!name) name = "armada_thermal"; else name++; } /* Save the name locally */ strscpy(priv->zone_name, name, THERMAL_NAME_LENGTH); /* Then check there are no '-' or hwmon core will complain */ do { insane_char = strpbrk(priv->zone_name, "-"); if (insane_char) *insane_char = '_'; } while (insane_char); } /* * The IP can manage to trigger interrupts on overheat situation from all the * sensors. However, the interrupt source changes along with the last selected * source (ie. the last read sensor), which is an inconsistent behavior. Avoid * possible glitches by always selecting back only one channel (arbitrarily: the * first in the DT which has a critical trip point). We also disable sensor * switch during overheat situations. */ static int armada_configure_overheat_int(struct armada_thermal_priv *priv, struct thermal_zone_device *tz, int sensor_id) { /* Retrieve the critical trip point to enable the overheat interrupt */ int temperature; int ret; ret = thermal_zone_get_crit_temp(tz, &temperature); if (ret) return ret; ret = armada_select_channel(priv, sensor_id); if (ret) return ret; /* * A critical temperature does not have a hysteresis */ armada_set_overheat_thresholds(priv, temperature, 0); priv->overheat_sensor = tz; priv->interrupt_source = sensor_id; armada_enable_overheat_interrupt(priv); return 0; } static int armada_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *tz; struct armada_thermal_sensor *sensor; struct armada_drvdata *drvdata; const struct of_device_id *match; struct armada_thermal_priv *priv; int sensor_id, irq; int ret; match = of_match_device(armada_thermal_id_table, &pdev->dev); if (!match) return -ENODEV; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; priv->dev = &pdev->dev; priv->data = (struct armada_thermal_data *)match->data; mutex_init(&priv->update_lock); /* * Legacy DT bindings only described "control1" register (also referred * as "control MSB" on old documentation). Then, bindings moved to cover * "control0/control LSB" and "control1/control MSB" registers within * the same resource, which was then of size 8 instead of 4. * * The logic of defining sporadic registers is broken. For instance, it * blocked the addition of the overheat interrupt feature that needed * another resource somewhere else in the same memory area. One solution * is to define an overall system controller and put the thermal node * into it, which requires the use of regmaps across all the driver. */ if (IS_ERR(syscon_node_to_regmap(pdev->dev.parent->of_node))) { /* Ensure device name is correct for the thermal core */ armada_set_sane_name(pdev, priv); ret = armada_thermal_probe_legacy(pdev, priv); if (ret) return ret; priv->data->init(pdev, priv); /* Wait the sensors to be valid */ armada_wait_sensor_validity(priv); tz = thermal_tripless_zone_device_register(priv->zone_name, priv, &legacy_ops, NULL); if (IS_ERR(tz)) { dev_err(&pdev->dev, "Failed to register thermal zone device\n"); return PTR_ERR(tz); } ret = thermal_zone_device_enable(tz); if (ret) { thermal_zone_device_unregister(tz); return ret; } drvdata->type = LEGACY; drvdata->data.tz = tz; platform_set_drvdata(pdev, drvdata); return 0; } ret = armada_thermal_probe_syscon(pdev, priv); if (ret) return ret; priv->current_channel = -1; priv->data->init(pdev, priv); drvdata->type = SYSCON; drvdata->data.priv = priv; platform_set_drvdata(pdev, drvdata); irq = platform_get_irq(pdev, 0); if (irq == -EPROBE_DEFER) return irq; /* The overheat interrupt feature is not mandatory */ if (irq > 0) { ret = devm_request_threaded_irq(&pdev->dev, irq, armada_overheat_isr, armada_overheat_isr_thread, 0, NULL, priv); if (ret) { dev_err(&pdev->dev, "Cannot request threaded IRQ %d\n", irq); return ret; } } /* * There is one channel for the IC and one per CPU (if any), each * channel has one sensor. */ for (sensor_id = 0; sensor_id <= priv->data->cpu_nr; sensor_id++) { sensor = devm_kzalloc(&pdev->dev, sizeof(struct armada_thermal_sensor), GFP_KERNEL); if (!sensor) return -ENOMEM; /* Register the sensor */ sensor->priv = priv; sensor->id = sensor_id; tz = devm_thermal_of_zone_register(&pdev->dev, sensor->id, sensor, &of_ops); if (IS_ERR(tz)) { dev_info(&pdev->dev, "Thermal sensor %d unavailable\n", sensor_id); devm_kfree(&pdev->dev, sensor); continue; } /* * The first channel that has a critical trip point registered * in the DT will serve as interrupt source. Others possible * critical trip points will simply be ignored by the driver. */ if (irq > 0 && !priv->overheat_sensor) armada_configure_overheat_int(priv, tz, sensor->id); } /* Just complain if no overheat interrupt was set up */ if (!priv->overheat_sensor) dev_warn(&pdev->dev, "Overheat interrupt not available\n"); return 0; } static int armada_thermal_exit(struct platform_device *pdev) { struct armada_drvdata *drvdata = platform_get_drvdata(pdev); if (drvdata->type == LEGACY) thermal_zone_device_unregister(drvdata->data.tz); return 0; } static struct platform_driver armada_thermal_driver = { .probe = armada_thermal_probe, .remove = armada_thermal_exit, .driver = { .name = "armada_thermal", .of_match_table = armada_thermal_id_table, }, }; module_platform_driver(armada_thermal_driver); MODULE_AUTHOR("Ezequiel Garcia <[email protected]>"); MODULE_DESCRIPTION("Marvell EBU Armada SoCs thermal driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/armada_thermal.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Thermal device driver for DA9062 and DA9061 * Copyright (C) 2017 Dialog Semiconductor */ /* When over-temperature is reached, an interrupt from the device will be * triggered. Following this event the interrupt will be disabled and * periodic transmission of uevents (HOT trip point) should define the * first level of temperature supervision. It is expected that any final * implementation of the thermal driver will include a .notify() function * to implement these uevents to userspace. * * These uevents are intended to indicate non-invasive temperature control * of the system, where the necessary measures for cooling are the * responsibility of the host software. Once the temperature falls again, * the IRQ is re-enabled so the start of a new over-temperature event can * be detected without constant software monitoring. */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/thermal.h> #include <linux/workqueue.h> #include <linux/mfd/da9062/core.h> #include <linux/mfd/da9062/registers.h> /* Minimum, maximum and default polling millisecond periods are provided * here as an example. It is expected that any final implementation to also * include a modification of these settings to match the required * application. */ #define DA9062_DEFAULT_POLLING_MS_PERIOD 3000 #define DA9062_MAX_POLLING_MS_PERIOD 10000 #define DA9062_MIN_POLLING_MS_PERIOD 1000 #define DA9062_MILLI_CELSIUS(t) ((t) * 1000) static unsigned int pp_tmp = DA9062_DEFAULT_POLLING_MS_PERIOD; struct da9062_thermal_config { const char *name; }; struct da9062_thermal { struct da9062 *hw; struct delayed_work work; struct thermal_zone_device *zone; struct mutex lock; /* protection for da9062_thermal temperature */ int temperature; int irq; const struct da9062_thermal_config *config; struct device *dev; }; static void da9062_thermal_poll_on(struct work_struct *work) { struct da9062_thermal *thermal = container_of(work, struct da9062_thermal, work.work); unsigned long delay; unsigned int val; int ret; /* clear E_TEMP */ ret = regmap_write(thermal->hw->regmap, DA9062AA_EVENT_B, DA9062AA_E_TEMP_MASK); if (ret < 0) { dev_err(thermal->dev, "Cannot clear the TJUNC temperature status\n"); goto err_enable_irq; } /* Now read E_TEMP again: it is acting like a status bit. * If over-temperature, then this status will be true. * If not over-temperature, this status will be false. */ ret = regmap_read(thermal->hw->regmap, DA9062AA_EVENT_B, &val); if (ret < 0) { dev_err(thermal->dev, "Cannot check the TJUNC temperature status\n"); goto err_enable_irq; } if (val & DA9062AA_E_TEMP_MASK) { mutex_lock(&thermal->lock); thermal->temperature = DA9062_MILLI_CELSIUS(125); mutex_unlock(&thermal->lock); thermal_zone_device_update(thermal->zone, THERMAL_EVENT_UNSPECIFIED); /* * pp_tmp is between 1s and 10s, so we can round the jiffies */ delay = round_jiffies(msecs_to_jiffies(pp_tmp)); queue_delayed_work(system_freezable_wq, &thermal->work, delay); return; } mutex_lock(&thermal->lock); thermal->temperature = DA9062_MILLI_CELSIUS(0); mutex_unlock(&thermal->lock); thermal_zone_device_update(thermal->zone, THERMAL_EVENT_UNSPECIFIED); err_enable_irq: enable_irq(thermal->irq); } static irqreturn_t da9062_thermal_irq_handler(int irq, void *data) { struct da9062_thermal *thermal = data; disable_irq_nosync(thermal->irq); queue_delayed_work(system_freezable_wq, &thermal->work, 0); return IRQ_HANDLED; } static int da9062_thermal_get_temp(struct thermal_zone_device *z, int *temp) { struct da9062_thermal *thermal = thermal_zone_device_priv(z); mutex_lock(&thermal->lock); *temp = thermal->temperature; mutex_unlock(&thermal->lock); return 0; } static struct thermal_zone_device_ops da9062_thermal_ops = { .get_temp = da9062_thermal_get_temp, }; static struct thermal_trip trips[] = { { .temperature = DA9062_MILLI_CELSIUS(125), .type = THERMAL_TRIP_HOT }, }; static const struct da9062_thermal_config da9062_config = { .name = "da9062-thermal", }; static const struct of_device_id da9062_compatible_reg_id_table[] = { { .compatible = "dlg,da9062-thermal", .data = &da9062_config }, { }, }; MODULE_DEVICE_TABLE(of, da9062_compatible_reg_id_table); static int da9062_thermal_probe(struct platform_device *pdev) { struct da9062 *chip = dev_get_drvdata(pdev->dev.parent); struct da9062_thermal *thermal; const struct of_device_id *match; int ret = 0; match = of_match_node(da9062_compatible_reg_id_table, pdev->dev.of_node); if (!match) return -ENXIO; if (pdev->dev.of_node) { if (!of_property_read_u32(pdev->dev.of_node, "polling-delay-passive", &pp_tmp)) { if (pp_tmp < DA9062_MIN_POLLING_MS_PERIOD || pp_tmp > DA9062_MAX_POLLING_MS_PERIOD) { dev_warn(&pdev->dev, "Out-of-range polling period %d ms\n", pp_tmp); pp_tmp = DA9062_DEFAULT_POLLING_MS_PERIOD; } } } thermal = devm_kzalloc(&pdev->dev, sizeof(struct da9062_thermal), GFP_KERNEL); if (!thermal) { ret = -ENOMEM; goto err; } thermal->config = match->data; thermal->hw = chip; thermal->dev = &pdev->dev; INIT_DELAYED_WORK(&thermal->work, da9062_thermal_poll_on); mutex_init(&thermal->lock); thermal->zone = thermal_zone_device_register_with_trips(thermal->config->name, trips, ARRAY_SIZE(trips), 0, thermal, &da9062_thermal_ops, NULL, pp_tmp, 0); if (IS_ERR(thermal->zone)) { dev_err(&pdev->dev, "Cannot register thermal zone device\n"); ret = PTR_ERR(thermal->zone); goto err; } ret = thermal_zone_device_enable(thermal->zone); if (ret) { dev_err(&pdev->dev, "Cannot enable thermal zone device\n"); goto err_zone; } dev_dbg(&pdev->dev, "TJUNC temperature polling period set at %d ms\n", pp_tmp); ret = platform_get_irq_byname(pdev, "THERMAL"); if (ret < 0) goto err_zone; thermal->irq = ret; ret = request_threaded_irq(thermal->irq, NULL, da9062_thermal_irq_handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "THERMAL", thermal); if (ret) { dev_err(&pdev->dev, "Failed to request thermal device IRQ.\n"); goto err_zone; } platform_set_drvdata(pdev, thermal); return 0; err_zone: thermal_zone_device_unregister(thermal->zone); err: return ret; } static int da9062_thermal_remove(struct platform_device *pdev) { struct da9062_thermal *thermal = platform_get_drvdata(pdev); free_irq(thermal->irq, thermal); cancel_delayed_work_sync(&thermal->work); thermal_zone_device_unregister(thermal->zone); return 0; } static struct platform_driver da9062_thermal_driver = { .probe = da9062_thermal_probe, .remove = da9062_thermal_remove, .driver = { .name = "da9062-thermal", .of_match_table = da9062_compatible_reg_id_table, }, }; module_platform_driver(da9062_thermal_driver); MODULE_AUTHOR("Steve Twiss"); MODULE_DESCRIPTION("Thermal TJUNC device driver for Dialog DA9062 and DA9061"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:da9062-thermal");
linux-master
drivers/thermal/da9062-thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * thermal_hwmon.c - Generic Thermal Management hwmon support. * * Code based on Intel thermal_core.c. Copyrights of the original code: * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <[email protected]> * Copyright (C) 2008 Sujith Thomas <[email protected]> * * Copyright (C) 2013 Texas Instruments * Copyright (C) 2013 Eduardo Valentin <[email protected]> */ #include <linux/err.h> #include <linux/export.h> #include <linux/hwmon.h> #include <linux/slab.h> #include <linux/thermal.h> #include "thermal_hwmon.h" #include "thermal_core.h" /* hwmon sys I/F */ /* thermal zone devices with the same type share one hwmon device */ struct thermal_hwmon_device { char type[THERMAL_NAME_LENGTH]; struct device *device; int count; struct list_head tz_list; struct list_head node; }; struct thermal_hwmon_attr { struct device_attribute attr; char name[16]; }; /* one temperature input for each thermal zone */ struct thermal_hwmon_temp { struct list_head hwmon_node; struct thermal_zone_device *tz; struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ }; static LIST_HEAD(thermal_hwmon_list); static DEFINE_MUTEX(thermal_hwmon_list_lock); static ssize_t temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) { int temperature; int ret; struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); struct thermal_hwmon_temp *temp = container_of(hwmon_attr, struct thermal_hwmon_temp, temp_input); struct thermal_zone_device *tz = temp->tz; ret = thermal_zone_get_temp(tz, &temperature); if (ret) return ret; return sprintf(buf, "%d\n", temperature); } static ssize_t temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); struct thermal_hwmon_temp *temp = container_of(hwmon_attr, struct thermal_hwmon_temp, temp_crit); struct thermal_zone_device *tz = temp->tz; int temperature; int ret; mutex_lock(&tz->lock); if (device_is_registered(&tz->device)) ret = tz->ops->get_crit_temp(tz, &temperature); else ret = -ENODEV; mutex_unlock(&tz->lock); if (ret) return ret; return sprintf(buf, "%d\n", temperature); } static struct thermal_hwmon_device * thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; char type[THERMAL_NAME_LENGTH]; mutex_lock(&thermal_hwmon_list_lock); list_for_each_entry(hwmon, &thermal_hwmon_list, node) { strcpy(type, tz->type); strreplace(type, '-', '_'); if (!strcmp(hwmon->type, type)) { mutex_unlock(&thermal_hwmon_list_lock); return hwmon; } } mutex_unlock(&thermal_hwmon_list_lock); return NULL; } /* Find the temperature input matching a given thermal zone */ static struct thermal_hwmon_temp * thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, const struct thermal_zone_device *tz) { struct thermal_hwmon_temp *temp; mutex_lock(&thermal_hwmon_list_lock); list_for_each_entry(temp, &hwmon->tz_list, hwmon_node) if (temp->tz == tz) { mutex_unlock(&thermal_hwmon_list_lock); return temp; } mutex_unlock(&thermal_hwmon_list_lock); return NULL; } static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) { int temp; return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); } int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; struct thermal_hwmon_temp *temp; int new_hwmon_device = 1; int result; hwmon = thermal_hwmon_lookup_by_type(tz); if (hwmon) { new_hwmon_device = 0; goto register_sys_interface; } hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL); if (!hwmon) return -ENOMEM; INIT_LIST_HEAD(&hwmon->tz_list); strscpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); strreplace(hwmon->type, '-', '_'); hwmon->device = hwmon_device_register_for_thermal(&tz->device, hwmon->type, hwmon); if (IS_ERR(hwmon->device)) { result = PTR_ERR(hwmon->device); goto free_mem; } register_sys_interface: temp = kzalloc(sizeof(*temp), GFP_KERNEL); if (!temp) { result = -ENOMEM; goto unregister_name; } temp->tz = tz; hwmon->count++; snprintf(temp->temp_input.name, sizeof(temp->temp_input.name), "temp%d_input", hwmon->count); temp->temp_input.attr.attr.name = temp->temp_input.name; temp->temp_input.attr.attr.mode = 0444; temp->temp_input.attr.show = temp_input_show; sysfs_attr_init(&temp->temp_input.attr.attr); result = device_create_file(hwmon->device, &temp->temp_input.attr); if (result) goto free_temp_mem; if (thermal_zone_crit_temp_valid(tz)) { snprintf(temp->temp_crit.name, sizeof(temp->temp_crit.name), "temp%d_crit", hwmon->count); temp->temp_crit.attr.attr.name = temp->temp_crit.name; temp->temp_crit.attr.attr.mode = 0444; temp->temp_crit.attr.show = temp_crit_show; sysfs_attr_init(&temp->temp_crit.attr.attr); result = device_create_file(hwmon->device, &temp->temp_crit.attr); if (result) goto unregister_input; } mutex_lock(&thermal_hwmon_list_lock); if (new_hwmon_device) list_add_tail(&hwmon->node, &thermal_hwmon_list); list_add_tail(&temp->hwmon_node, &hwmon->tz_list); mutex_unlock(&thermal_hwmon_list_lock); return 0; unregister_input: device_remove_file(hwmon->device, &temp->temp_input.attr); free_temp_mem: kfree(temp); unregister_name: if (new_hwmon_device) hwmon_device_unregister(hwmon->device); free_mem: kfree(hwmon); return result; } EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs); void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; struct thermal_hwmon_temp *temp; hwmon = thermal_hwmon_lookup_by_type(tz); if (unlikely(!hwmon)) { /* Should never happen... */ dev_dbg(&tz->device, "hwmon device lookup failed!\n"); return; } temp = thermal_hwmon_lookup_temp(hwmon, tz); if (unlikely(!temp)) { /* Should never happen... */ dev_dbg(&tz->device, "temperature input lookup failed!\n"); return; } device_remove_file(hwmon->device, &temp->temp_input.attr); if (thermal_zone_crit_temp_valid(tz)) device_remove_file(hwmon->device, &temp->temp_crit.attr); mutex_lock(&thermal_hwmon_list_lock); list_del(&temp->hwmon_node); kfree(temp); if (!list_empty(&hwmon->tz_list)) { mutex_unlock(&thermal_hwmon_list_lock); return; } list_del(&hwmon->node); mutex_unlock(&thermal_hwmon_list_lock); hwmon_device_unregister(hwmon->device); kfree(hwmon); } EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs); static void devm_thermal_hwmon_release(struct device *dev, void *res) { thermal_remove_hwmon_sysfs(*(struct thermal_zone_device **)res); } int devm_thermal_add_hwmon_sysfs(struct device *dev, struct thermal_zone_device *tz) { struct thermal_zone_device **ptr; int ret; ptr = devres_alloc(devm_thermal_hwmon_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) { dev_warn(dev, "Failed to allocate device resource data\n"); return -ENOMEM; } ret = thermal_add_hwmon_sysfs(tz); if (ret) { dev_warn(dev, "Failed to add hwmon sysfs attributes\n"); devres_free(ptr); return ret; } *ptr = tz; devres_add(dev, ptr); return ret; } EXPORT_SYMBOL_GPL(devm_thermal_add_hwmon_sysfs); MODULE_IMPORT_NS(HWMON_THERMAL);
linux-master
drivers/thermal/thermal_hwmon.c
// SPDX-License-Identifier: GPL-2.0 /* * devfreq_cooling: Thermal cooling device implementation for devices using * devfreq * * Copyright (C) 2014-2015 ARM Limited * * TODO: * - If OPPs are added or removed after devfreq cooling has * registered, the devfreq cooling won't react to it. */ #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> #include <linux/energy_model.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/pm_opp.h> #include <linux/pm_qos.h> #include <linux/thermal.h> #include <linux/units.h> #include "thermal_trace.h" #define SCALE_ERROR_MITIGATION 100 /** * struct devfreq_cooling_device - Devfreq cooling device * devfreq_cooling_device registered. * @cdev: Pointer to associated thermal cooling device. * @cooling_ops: devfreq callbacks to thermal cooling device ops * @devfreq: Pointer to associated devfreq device. * @cooling_state: Current cooling state. * @freq_table: Pointer to a table with the frequencies sorted in descending * order. You can index the table by cooling device state * @max_state: It is the last index, that is, one less than the number of the * OPPs * @power_ops: Pointer to devfreq_cooling_power, a more precised model. * @res_util: Resource utilization scaling factor for the power. * It is multiplied by 100 to minimize the error. It is used * for estimation of the power budget instead of using * 'utilization' (which is 'busy_time' / 'total_time'). * The 'res_util' range is from 100 to power * 100 for the * corresponding 'state'. * @capped_state: index to cooling state with in dynamic power budget * @req_max_freq: PM QoS request for limiting the maximum frequency * of the devfreq device. * @em_pd: Energy Model for the associated Devfreq device */ struct devfreq_cooling_device { struct thermal_cooling_device *cdev; struct thermal_cooling_device_ops cooling_ops; struct devfreq *devfreq; unsigned long cooling_state; u32 *freq_table; size_t max_state; struct devfreq_cooling_power *power_ops; u32 res_util; int capped_state; struct dev_pm_qos_request req_max_freq; struct em_perf_domain *em_pd; }; static int devfreq_cooling_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct devfreq_cooling_device *dfc = cdev->devdata; *state = dfc->max_state; return 0; } static int devfreq_cooling_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct devfreq_cooling_device *dfc = cdev->devdata; *state = dfc->cooling_state; return 0; } static int devfreq_cooling_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct devfreq_cooling_device *dfc = cdev->devdata; struct devfreq *df = dfc->devfreq; struct device *dev = df->dev.parent; unsigned long freq; int perf_idx; if (state == dfc->cooling_state) return 0; dev_dbg(dev, "Setting cooling state %lu\n", state); if (state > dfc->max_state) return -EINVAL; if (dfc->em_pd) { perf_idx = dfc->max_state - state; freq = dfc->em_pd->table[perf_idx].frequency * 1000; } else { freq = dfc->freq_table[state]; } dev_pm_qos_update_request(&dfc->req_max_freq, DIV_ROUND_UP(freq, HZ_PER_KHZ)); dfc->cooling_state = state; return 0; } /** * get_perf_idx() - get the performance index corresponding to a frequency * @em_pd: Pointer to device's Energy Model * @freq: frequency in kHz * * Return: the performance index associated with the @freq, or * -EINVAL if it wasn't found. */ static int get_perf_idx(struct em_perf_domain *em_pd, unsigned long freq) { int i; for (i = 0; i < em_pd->nr_perf_states; i++) { if (em_pd->table[i].frequency == freq) return i; } return -EINVAL; } static unsigned long get_voltage(struct devfreq *df, unsigned long freq) { struct device *dev = df->dev.parent; unsigned long voltage; struct dev_pm_opp *opp; opp = dev_pm_opp_find_freq_exact(dev, freq, true); if (PTR_ERR(opp) == -ERANGE) opp = dev_pm_opp_find_freq_exact(dev, freq, false); if (IS_ERR(opp)) { dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n", freq, PTR_ERR(opp)); return 0; } voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ dev_pm_opp_put(opp); if (voltage == 0) { dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n", freq); } return voltage; } static void _normalize_load(struct devfreq_dev_status *status) { if (status->total_time > 0xfffff) { status->total_time >>= 10; status->busy_time >>= 10; } status->busy_time <<= 10; status->busy_time /= status->total_time ? : 1; status->busy_time = status->busy_time ? : 1; status->total_time = 1024; } static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cdev, u32 *power) { struct devfreq_cooling_device *dfc = cdev->devdata; struct devfreq *df = dfc->devfreq; struct devfreq_dev_status status; unsigned long state; unsigned long freq; unsigned long voltage; int res, perf_idx; mutex_lock(&df->lock); status = df->last_status; mutex_unlock(&df->lock); freq = status.current_frequency; if (dfc->power_ops && dfc->power_ops->get_real_power) { voltage = get_voltage(df, freq); if (voltage == 0) { res = -EINVAL; goto fail; } res = dfc->power_ops->get_real_power(df, power, freq, voltage); if (!res) { state = dfc->capped_state; /* Convert EM power into milli-Watts first */ dfc->res_util = dfc->em_pd->table[state].power; dfc->res_util /= MICROWATT_PER_MILLIWATT; dfc->res_util *= SCALE_ERROR_MITIGATION; if (*power > 1) dfc->res_util /= *power; } else { goto fail; } } else { /* Energy Model frequencies are in kHz */ perf_idx = get_perf_idx(dfc->em_pd, freq / 1000); if (perf_idx < 0) { res = -EAGAIN; goto fail; } _normalize_load(&status); /* Convert EM power into milli-Watts first */ *power = dfc->em_pd->table[perf_idx].power; *power /= MICROWATT_PER_MILLIWATT; /* Scale power for utilization */ *power *= status.busy_time; *power >>= 10; } trace_thermal_power_devfreq_get_power(cdev, &status, freq, *power); return 0; fail: /* It is safe to set max in this case */ dfc->res_util = SCALE_ERROR_MITIGATION; return res; } static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev, unsigned long state, u32 *power) { struct devfreq_cooling_device *dfc = cdev->devdata; int perf_idx; if (state > dfc->max_state) return -EINVAL; perf_idx = dfc->max_state - state; *power = dfc->em_pd->table[perf_idx].power; *power /= MICROWATT_PER_MILLIWATT; return 0; } static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev, u32 power, unsigned long *state) { struct devfreq_cooling_device *dfc = cdev->devdata; struct devfreq *df = dfc->devfreq; struct devfreq_dev_status status; unsigned long freq, em_power_mw; s32 est_power; int i; mutex_lock(&df->lock); status = df->last_status; mutex_unlock(&df->lock); freq = status.current_frequency; if (dfc->power_ops && dfc->power_ops->get_real_power) { /* Scale for resource utilization */ est_power = power * dfc->res_util; est_power /= SCALE_ERROR_MITIGATION; } else { /* Scale dynamic power for utilization */ _normalize_load(&status); est_power = power << 10; est_power /= status.busy_time; } /* * Find the first cooling state that is within the power * budget. The EM power table is sorted ascending. */ for (i = dfc->max_state; i > 0; i--) { /* Convert EM power to milli-Watts to make safe comparison */ em_power_mw = dfc->em_pd->table[i].power; em_power_mw /= MICROWATT_PER_MILLIWATT; if (est_power >= em_power_mw) break; } *state = dfc->max_state - i; dfc->capped_state = *state; trace_thermal_power_devfreq_limit(cdev, freq, *state, power); return 0; } /** * devfreq_cooling_gen_tables() - Generate frequency table. * @dfc: Pointer to devfreq cooling device. * @num_opps: Number of OPPs * * Generate frequency table which holds the frequencies in descending * order. That way its indexed by cooling device state. This is for * compatibility with drivers which do not register Energy Model. * * Return: 0 on success, negative error code on failure. */ static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc, int num_opps) { struct devfreq *df = dfc->devfreq; struct device *dev = df->dev.parent; unsigned long freq; int i; dfc->freq_table = kcalloc(num_opps, sizeof(*dfc->freq_table), GFP_KERNEL); if (!dfc->freq_table) return -ENOMEM; for (i = 0, freq = ULONG_MAX; i < num_opps; i++, freq--) { struct dev_pm_opp *opp; opp = dev_pm_opp_find_freq_floor(dev, &freq); if (IS_ERR(opp)) { kfree(dfc->freq_table); return PTR_ERR(opp); } dev_pm_opp_put(opp); dfc->freq_table[i] = freq; } return 0; } /** * of_devfreq_cooling_register_power() - Register devfreq cooling device, * with OF and power information. * @np: Pointer to OF device_node. * @df: Pointer to devfreq device. * @dfc_power: Pointer to devfreq_cooling_power. * * Register a devfreq cooling device. The available OPPs must be * registered on the device. * * If @dfc_power is provided, the cooling device is registered with the * power extensions. For the power extensions to work correctly, * devfreq should use the simple_ondemand governor, other governors * are not currently supported. */ struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power) { struct thermal_cooling_device *cdev; struct device *dev = df->dev.parent; struct devfreq_cooling_device *dfc; struct em_perf_domain *em; struct thermal_cooling_device_ops *ops; char *name; int err, num_opps; dfc = kzalloc(sizeof(*dfc), GFP_KERNEL); if (!dfc) return ERR_PTR(-ENOMEM); dfc->devfreq = df; ops = &dfc->cooling_ops; ops->get_max_state = devfreq_cooling_get_max_state; ops->get_cur_state = devfreq_cooling_get_cur_state; ops->set_cur_state = devfreq_cooling_set_cur_state; em = em_pd_get(dev); if (em && !em_is_artificial(em)) { dfc->em_pd = em; ops->get_requested_power = devfreq_cooling_get_requested_power; ops->state2power = devfreq_cooling_state2power; ops->power2state = devfreq_cooling_power2state; dfc->power_ops = dfc_power; num_opps = em_pd_nr_perf_states(dfc->em_pd); } else { /* Backward compatibility for drivers which do not use IPA */ dev_dbg(dev, "missing proper EM for cooling device\n"); num_opps = dev_pm_opp_get_opp_count(dev); err = devfreq_cooling_gen_tables(dfc, num_opps); if (err) goto free_dfc; } if (num_opps <= 0) { err = -EINVAL; goto free_dfc; } /* max_state is an index, not a counter */ dfc->max_state = num_opps - 1; err = dev_pm_qos_add_request(dev, &dfc->req_max_freq, DEV_PM_QOS_MAX_FREQUENCY, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); if (err < 0) goto free_table; err = -ENOMEM; name = kasprintf(GFP_KERNEL, "devfreq-%s", dev_name(dev)); if (!name) goto remove_qos_req; cdev = thermal_of_cooling_device_register(np, name, dfc, ops); kfree(name); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(dev, "Failed to register devfreq cooling device (%d)\n", err); goto remove_qos_req; } dfc->cdev = cdev; return cdev; remove_qos_req: dev_pm_qos_remove_request(&dfc->req_max_freq); free_table: kfree(dfc->freq_table); free_dfc: kfree(dfc); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(of_devfreq_cooling_register_power); /** * of_devfreq_cooling_register() - Register devfreq cooling device, * with OF information. * @np: Pointer to OF device_node. * @df: Pointer to devfreq device. */ struct thermal_cooling_device * of_devfreq_cooling_register(struct device_node *np, struct devfreq *df) { return of_devfreq_cooling_register_power(np, df, NULL); } EXPORT_SYMBOL_GPL(of_devfreq_cooling_register); /** * devfreq_cooling_register() - Register devfreq cooling device. * @df: Pointer to devfreq device. */ struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df) { return of_devfreq_cooling_register(NULL, df); } EXPORT_SYMBOL_GPL(devfreq_cooling_register); /** * devfreq_cooling_em_register() - Register devfreq cooling device with * power information and automatically register Energy Model (EM) * @df: Pointer to devfreq device. * @dfc_power: Pointer to devfreq_cooling_power. * * Register a devfreq cooling device and automatically register EM. The * available OPPs must be registered for the device. * * If @dfc_power is provided, the cooling device is registered with the * power extensions. It is using the simple Energy Model which requires * "dynamic-power-coefficient" a devicetree property. To not break drivers * which miss that DT property, the function won't bail out when the EM * registration failed. The cooling device will be registered if everything * else is OK. */ struct thermal_cooling_device * devfreq_cooling_em_register(struct devfreq *df, struct devfreq_cooling_power *dfc_power) { struct thermal_cooling_device *cdev; struct device *dev; int ret; if (IS_ERR_OR_NULL(df)) return ERR_PTR(-EINVAL); dev = df->dev.parent; ret = dev_pm_opp_of_register_em(dev, NULL); if (ret) dev_dbg(dev, "Unable to register EM for devfreq cooling device (%d)\n", ret); cdev = of_devfreq_cooling_register_power(dev->of_node, df, dfc_power); if (IS_ERR_OR_NULL(cdev)) em_dev_unregister_perf_domain(dev); return cdev; } EXPORT_SYMBOL_GPL(devfreq_cooling_em_register); /** * devfreq_cooling_unregister() - Unregister devfreq cooling device. * @cdev: Pointer to devfreq cooling device to unregister. * * Unregisters devfreq cooling device and related Energy Model if it was * present. */ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev) { struct devfreq_cooling_device *dfc; struct device *dev; if (IS_ERR_OR_NULL(cdev)) return; dfc = cdev->devdata; dev = dfc->devfreq->dev.parent; thermal_cooling_device_unregister(dfc->cdev); dev_pm_qos_remove_request(&dfc->req_max_freq); em_dev_unregister_perf_domain(dev); kfree(dfc->freq_table); kfree(dfc); } EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
linux-master
drivers/thermal/devfreq_cooling.c
// SPDX-License-Identifier: GPL-2.0-only /* * fair_share.c - A simple weight based Thermal governor * * Copyright (C) 2012 Intel Corp * Copyright (C) 2012 Durgadoss R <[email protected]> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/thermal.h> #include "thermal_trace.h" #include "thermal_core.h" /** * get_trip_level: - obtains the current trip level for a zone * @tz: thermal zone device */ static int get_trip_level(struct thermal_zone_device *tz) { struct thermal_trip trip; int count; for (count = 0; count < tz->num_trips; count++) { __thermal_zone_get_trip(tz, count, &trip); if (tz->temperature < trip.temperature) break; } /* * count > 0 only if temperature is greater than first trip * point, in which case, trip_point = count - 1 */ if (count > 0) trace_thermal_zone_trip(tz, count - 1, trip.type); return count; } static long get_target_state(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int percentage, int level) { return (long)(percentage * level * cdev->max_state) / (100 * tz->num_trips); } /** * fair_share_throttle - throttles devices associated with the given zone * @tz: thermal_zone_device * @trip: trip point index * * Throttling Logic: This uses three parameters to calculate the new * throttle state of the cooling devices associated with the given zone. * * Parameters used for Throttling: * P1. max_state: Maximum throttle state exposed by the cooling device. * P2. percentage[i]/100: * How 'effective' the 'i'th device is, in cooling the given zone. * P3. cur_trip_level/max_no_of_trips: * This describes the extent to which the devices should be throttled. * We do not want to throttle too much when we trip a lower temperature, * whereas the throttling is at full swing if we trip critical levels. * (Heavily assumes the trip points are in ascending order) * new_state of cooling device = P3 * P2 * P1 */ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) { struct thermal_instance *instance; int total_weight = 0; int total_instance = 0; int cur_trip_level = get_trip_level(tz); lockdep_assert_held(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip) continue; total_weight += instance->weight; total_instance++; } list_for_each_entry(instance, &tz->thermal_instances, tz_node) { int percentage; struct thermal_cooling_device *cdev = instance->cdev; if (instance->trip != trip) continue; if (!total_weight) percentage = 100 / total_instance; else percentage = (instance->weight * 100) / total_weight; instance->target = get_target_state(tz, cdev, percentage, cur_trip_level); mutex_lock(&cdev->lock); __thermal_cdev_update(cdev); mutex_unlock(&cdev->lock); } return 0; } static struct thermal_governor thermal_gov_fair_share = { .name = "fair_share", .throttle = fair_share_throttle, }; THERMAL_GOVERNOR_DECLARE(thermal_gov_fair_share);
linux-master
drivers/thermal/gov_fair_share.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * db8500_thermal.c - DB8500 Thermal Management Implementation * * Copyright (C) 2012 ST-Ericsson * Copyright (C) 2012-2019 Linaro Ltd. * * Authors: Hongbo Zhang, Linus Walleij */ #include <linux/cpu_cooling.h> #include <linux/interrupt.h> #include <linux/mfd/dbx500-prcmu.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/thermal.h> #define PRCMU_DEFAULT_MEASURE_TIME 0xFFF #define PRCMU_DEFAULT_LOW_TEMP 0 /** * db8500_thermal_points - the interpolation points that trigger * interrupts */ static const unsigned long db8500_thermal_points[] = { 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 55000, 60000, 65000, 70000, 75000, 80000, /* * This is where things start to get really bad for the * SoC and the thermal zones should be set up to trigger * critical temperature at 85000 mC so we don't get above * this point. */ 85000, 90000, 95000, 100000, }; struct db8500_thermal_zone { struct thermal_zone_device *tz; struct device *dev; unsigned long interpolated_temp; unsigned int cur_index; }; /* Callback to get current temperature */ static int db8500_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct db8500_thermal_zone *th = thermal_zone_device_priv(tz); /* * TODO: There is no PRCMU interface to get temperature data currently, * so a pseudo temperature is returned , it works for thermal framework * and this will be fixed when the PRCMU interface is available. */ *temp = th->interpolated_temp; return 0; } static const struct thermal_zone_device_ops thdev_ops = { .get_temp = db8500_thermal_get_temp, }; static void db8500_thermal_update_config(struct db8500_thermal_zone *th, unsigned int idx, unsigned long next_low, unsigned long next_high) { prcmu_stop_temp_sense(); th->cur_index = idx; th->interpolated_temp = (next_low + next_high)/2; /* * The PRCMU accept absolute temperatures in celsius so divide * down the millicelsius with 1000 */ prcmu_config_hotmon((u8)(next_low/1000), (u8)(next_high/1000)); prcmu_start_temp_sense(PRCMU_DEFAULT_MEASURE_TIME); } static irqreturn_t prcmu_low_irq_handler(int irq, void *irq_data) { struct db8500_thermal_zone *th = irq_data; unsigned int idx = th->cur_index; unsigned long next_low, next_high; if (idx == 0) /* Meaningless for thermal management, ignoring it */ return IRQ_HANDLED; if (idx == 1) { next_high = db8500_thermal_points[0]; next_low = PRCMU_DEFAULT_LOW_TEMP; } else { next_high = db8500_thermal_points[idx - 1]; next_low = db8500_thermal_points[idx - 2]; } idx -= 1; db8500_thermal_update_config(th, idx, next_low, next_high); dev_dbg(th->dev, "PRCMU set max %ld, min %ld\n", next_high, next_low); thermal_zone_device_update(th->tz, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data) { struct db8500_thermal_zone *th = irq_data; unsigned int idx = th->cur_index; unsigned long next_low, next_high; int num_points = ARRAY_SIZE(db8500_thermal_points); if (idx < num_points - 1) { next_high = db8500_thermal_points[idx+1]; next_low = db8500_thermal_points[idx]; idx += 1; db8500_thermal_update_config(th, idx, next_low, next_high); dev_dbg(th->dev, "PRCMU set max %ld, min %ld\n", next_high, next_low); } else if (idx == num_points - 1) /* So we roof out 1 degree over the max point */ th->interpolated_temp = db8500_thermal_points[idx] + 1; thermal_zone_device_update(th->tz, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static int db8500_thermal_probe(struct platform_device *pdev) { struct db8500_thermal_zone *th = NULL; struct device *dev = &pdev->dev; int low_irq, high_irq, ret = 0; th = devm_kzalloc(dev, sizeof(*th), GFP_KERNEL); if (!th) return -ENOMEM; th->dev = dev; low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW"); if (low_irq < 0) return low_irq; ret = devm_request_threaded_irq(dev, low_irq, NULL, prcmu_low_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT, "dbx500_temp_low", th); if (ret < 0) { dev_err(dev, "failed to allocate temp low irq\n"); return ret; } high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH"); if (high_irq < 0) return high_irq; ret = devm_request_threaded_irq(dev, high_irq, NULL, prcmu_high_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT, "dbx500_temp_high", th); if (ret < 0) { dev_err(dev, "failed to allocate temp high irq\n"); return ret; } /* register of thermal sensor and get info from DT */ th->tz = devm_thermal_of_zone_register(dev, 0, th, &thdev_ops); if (IS_ERR(th->tz)) { dev_err(dev, "register thermal zone sensor failed\n"); return PTR_ERR(th->tz); } dev_info(dev, "thermal zone sensor registered\n"); /* Start measuring at the lowest point */ db8500_thermal_update_config(th, 0, PRCMU_DEFAULT_LOW_TEMP, db8500_thermal_points[0]); platform_set_drvdata(pdev, th); return 0; } static int db8500_thermal_suspend(struct platform_device *pdev, pm_message_t state) { prcmu_stop_temp_sense(); return 0; } static int db8500_thermal_resume(struct platform_device *pdev) { struct db8500_thermal_zone *th = platform_get_drvdata(pdev); /* Resume and start measuring at the lowest point */ db8500_thermal_update_config(th, 0, PRCMU_DEFAULT_LOW_TEMP, db8500_thermal_points[0]); return 0; } static const struct of_device_id db8500_thermal_match[] = { { .compatible = "stericsson,db8500-thermal" }, {}, }; MODULE_DEVICE_TABLE(of, db8500_thermal_match); static struct platform_driver db8500_thermal_driver = { .driver = { .name = "db8500-thermal", .of_match_table = db8500_thermal_match, }, .probe = db8500_thermal_probe, .suspend = db8500_thermal_suspend, .resume = db8500_thermal_resume, }; module_platform_driver(db8500_thermal_driver); MODULE_AUTHOR("Hongbo Zhang <[email protected]>"); MODULE_DESCRIPTION("DB8500 thermal driver"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/db8500_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * A power allocator to manage temperature * * Copyright (C) 2014 ARM Ltd. * */ #define pr_fmt(fmt) "Power allocator: " fmt #include <linux/slab.h> #include <linux/thermal.h> #define CREATE_TRACE_POINTS #include "thermal_trace_ipa.h" #include "thermal_core.h" #define INVALID_TRIP -1 #define FRAC_BITS 10 #define int_to_frac(x) ((x) << FRAC_BITS) #define frac_to_int(x) ((x) >> FRAC_BITS) /** * mul_frac() - multiply two fixed-point numbers * @x: first multiplicand * @y: second multiplicand * * Return: the result of multiplying two fixed-point numbers. The * result is also a fixed-point number. */ static inline s64 mul_frac(s64 x, s64 y) { return (x * y) >> FRAC_BITS; } /** * div_frac() - divide two fixed-point numbers * @x: the dividend * @y: the divisor * * Return: the result of dividing two fixed-point numbers. The * result is also a fixed-point number. */ static inline s64 div_frac(s64 x, s64 y) { return div_s64(x << FRAC_BITS, y); } /** * struct power_allocator_params - parameters for the power allocator governor * @allocated_tzp: whether we have allocated tzp for this thermal zone and * it needs to be freed on unbind * @err_integral: accumulated error in the PID controller. * @prev_err: error in the previous iteration of the PID controller. * Used to calculate the derivative term. * @trip_switch_on: first passive trip point of the thermal zone. The * governor switches on when this trip point is crossed. * If the thermal zone only has one passive trip point, * @trip_switch_on should be INVALID_TRIP. * @trip_max_desired_temperature: last passive trip point of the thermal * zone. The temperature we are * controlling for. * @sustainable_power: Sustainable power (heat) that this thermal zone can * dissipate */ struct power_allocator_params { bool allocated_tzp; s64 err_integral; s32 prev_err; int trip_switch_on; int trip_max_desired_temperature; u32 sustainable_power; }; /** * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone * @tz: thermal zone we are operating in * * For thermal zones that don't provide a sustainable_power in their * thermal_zone_params, estimate one. Calculate it using the minimum * power of all the cooling devices as that gives a valid value that * can give some degree of functionality. For optimal performance of * this governor, provide a sustainable_power in the thermal zone's * thermal_zone_params. */ static u32 estimate_sustainable_power(struct thermal_zone_device *tz) { u32 sustainable_power = 0; struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { struct thermal_cooling_device *cdev = instance->cdev; u32 min_power; if (instance->trip != params->trip_max_desired_temperature) continue; if (!cdev_is_power_actor(cdev)) continue; if (cdev->ops->state2power(cdev, instance->upper, &min_power)) continue; sustainable_power += min_power; } return sustainable_power; } /** * estimate_pid_constants() - Estimate the constants for the PID controller * @tz: thermal zone for which to estimate the constants * @sustainable_power: sustainable power for the thermal zone * @trip_switch_on: trip point number for the switch on temperature * @control_temp: target temperature for the power allocator governor * * This function is used to update the estimation of the PID * controller constants in struct thermal_zone_parameters. */ static void estimate_pid_constants(struct thermal_zone_device *tz, u32 sustainable_power, int trip_switch_on, int control_temp) { struct thermal_trip trip; u32 temperature_threshold = control_temp; int ret; s32 k_i; ret = __thermal_zone_get_trip(tz, trip_switch_on, &trip); if (!ret) temperature_threshold -= trip.temperature; /* * estimate_pid_constants() tries to find appropriate default * values for thermal zones that don't provide them. If a * system integrator has configured a thermal zone with two * passive trip points at the same temperature, that person * hasn't put any effort to set up the thermal zone properly * so just give up. */ if (!temperature_threshold) return; tz->tzp->k_po = int_to_frac(sustainable_power) / temperature_threshold; tz->tzp->k_pu = int_to_frac(2 * sustainable_power) / temperature_threshold; k_i = tz->tzp->k_pu / 10; tz->tzp->k_i = k_i > 0 ? k_i : 1; /* * The default for k_d and integral_cutoff is 0, so we can * leave them as they are. */ } /** * get_sustainable_power() - Get the right sustainable power * @tz: thermal zone for which to estimate the constants * @params: parameters for the power allocator governor * @control_temp: target temperature for the power allocator governor * * This function is used for getting the proper sustainable power value based * on variables which might be updated by the user sysfs interface. If that * happen the new value is going to be estimated and updated. It is also used * after thermal zone binding, where the initial values where set to 0. */ static u32 get_sustainable_power(struct thermal_zone_device *tz, struct power_allocator_params *params, int control_temp) { u32 sustainable_power; if (!tz->tzp->sustainable_power) sustainable_power = estimate_sustainable_power(tz); else sustainable_power = tz->tzp->sustainable_power; /* Check if it's init value 0 or there was update via sysfs */ if (sustainable_power != params->sustainable_power) { estimate_pid_constants(tz, sustainable_power, params->trip_switch_on, control_temp); /* Do the estimation only once and make available in sysfs */ tz->tzp->sustainable_power = sustainable_power; params->sustainable_power = sustainable_power; } return sustainable_power; } /** * pid_controller() - PID controller * @tz: thermal zone we are operating in * @control_temp: the target temperature in millicelsius * @max_allocatable_power: maximum allocatable power for this thermal zone * * This PID controller increases the available power budget so that the * temperature of the thermal zone gets as close as possible to * @control_temp and limits the power if it exceeds it. k_po is the * proportional term when we are overshooting, k_pu is the * proportional term when we are undershooting. integral_cutoff is a * threshold below which we stop accumulating the error. The * accumulated error is only valid if the requested power will make * the system warmer. If the system is mostly idle, there's no point * in accumulating positive error. * * Return: The power budget for the next period. */ static u32 pid_controller(struct thermal_zone_device *tz, int control_temp, u32 max_allocatable_power) { s64 p, i, d, power_range; s32 err, max_power_frac; u32 sustainable_power; struct power_allocator_params *params = tz->governor_data; max_power_frac = int_to_frac(max_allocatable_power); sustainable_power = get_sustainable_power(tz, params, control_temp); err = control_temp - tz->temperature; err = int_to_frac(err); /* Calculate the proportional term */ p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err); /* * Calculate the integral term * * if the error is less than cut off allow integration (but * the integral is limited to max power) */ i = mul_frac(tz->tzp->k_i, params->err_integral); if (err < int_to_frac(tz->tzp->integral_cutoff)) { s64 i_next = i + mul_frac(tz->tzp->k_i, err); if (abs(i_next) < max_power_frac) { i = i_next; params->err_integral += err; } } /* * Calculate the derivative term * * We do err - prev_err, so with a positive k_d, a decreasing * error (i.e. driving closer to the line) results in less * power being applied, slowing down the controller) */ d = mul_frac(tz->tzp->k_d, err - params->prev_err); d = div_frac(d, jiffies_to_msecs(tz->passive_delay_jiffies)); params->prev_err = err; power_range = p + i + d; /* feed-forward the known sustainable dissipatable power */ power_range = sustainable_power + frac_to_int(power_range); power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); trace_thermal_power_allocator_pid(tz, frac_to_int(err), frac_to_int(params->err_integral), frac_to_int(p), frac_to_int(i), frac_to_int(d), power_range); return power_range; } /** * power_actor_set_power() - limit the maximum power a cooling device consumes * @cdev: pointer to &thermal_cooling_device * @instance: thermal instance to update * @power: the power in milliwatts * * Set the cooling device to consume at most @power milliwatts. The limit is * expected to be a cap at the maximum power consumption. * * Return: 0 on success, -EINVAL if the cooling device does not * implement the power actor API or -E* for other failures. */ static int power_actor_set_power(struct thermal_cooling_device *cdev, struct thermal_instance *instance, u32 power) { unsigned long state; int ret; ret = cdev->ops->power2state(cdev, power, &state); if (ret) return ret; instance->target = clamp_val(state, instance->lower, instance->upper); mutex_lock(&cdev->lock); __thermal_cdev_update(cdev); mutex_unlock(&cdev->lock); return 0; } /** * divvy_up_power() - divvy the allocated power between the actors * @req_power: each actor's requested power * @max_power: each actor's maximum available power * @num_actors: size of the @req_power, @max_power and @granted_power's array * @total_req_power: sum of @req_power * @power_range: total allocated power * @granted_power: output array: each actor's granted power * @extra_actor_power: an appropriately sized array to be used in the * function as temporary storage of the extra power given * to the actors * * This function divides the total allocated power (@power_range) * fairly between the actors. It first tries to give each actor a * share of the @power_range according to how much power it requested * compared to the rest of the actors. For example, if only one actor * requests power, then it receives all the @power_range. If * three actors each requests 1mW, each receives a third of the * @power_range. * * If any actor received more than their maximum power, then that * surplus is re-divvied among the actors based on how far they are * from their respective maximums. * * Granted power for each actor is written to @granted_power, which * should've been allocated by the calling function. */ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, u32 total_req_power, u32 power_range, u32 *granted_power, u32 *extra_actor_power) { u32 extra_power, capped_extra_power; int i; /* * Prevent division by 0 if none of the actors request power. */ if (!total_req_power) total_req_power = 1; capped_extra_power = 0; extra_power = 0; for (i = 0; i < num_actors; i++) { u64 req_range = (u64)req_power[i] * power_range; granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, total_req_power); if (granted_power[i] > max_power[i]) { extra_power += granted_power[i] - max_power[i]; granted_power[i] = max_power[i]; } extra_actor_power[i] = max_power[i] - granted_power[i]; capped_extra_power += extra_actor_power[i]; } if (!extra_power) return; /* * Re-divvy the reclaimed extra among actors based on * how far they are from the max */ extra_power = min(extra_power, capped_extra_power); if (capped_extra_power > 0) for (i = 0; i < num_actors; i++) { u64 extra_range = (u64)extra_actor_power[i] * extra_power; granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range, capped_extra_power); } } static int allocate_power(struct thermal_zone_device *tz, int control_temp) { struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; u32 *req_power, *max_power, *granted_power, *extra_actor_power; u32 *weighted_req_power; u32 total_req_power, max_allocatable_power, total_weighted_req_power; u32 total_granted_power, power_range; int i, num_actors, total_weight, ret = 0; int trip_max_desired_temperature = params->trip_max_desired_temperature; num_actors = 0; total_weight = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if ((instance->trip == trip_max_desired_temperature) && cdev_is_power_actor(instance->cdev)) { num_actors++; total_weight += instance->weight; } } if (!num_actors) return -ENODEV; /* * We need to allocate five arrays of the same size: * req_power, max_power, granted_power, extra_actor_power and * weighted_req_power. They are going to be needed until this * function returns. Allocate them all in one go to simplify * the allocation and deallocation logic. */ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL); if (!req_power) return -ENOMEM; max_power = &req_power[num_actors]; granted_power = &req_power[2 * num_actors]; extra_actor_power = &req_power[3 * num_actors]; weighted_req_power = &req_power[4 * num_actors]; i = 0; total_weighted_req_power = 0; total_req_power = 0; max_allocatable_power = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { int weight; struct thermal_cooling_device *cdev = instance->cdev; if (instance->trip != trip_max_desired_temperature) continue; if (!cdev_is_power_actor(cdev)) continue; if (cdev->ops->get_requested_power(cdev, &req_power[i])) continue; if (!total_weight) weight = 1 << FRAC_BITS; else weight = instance->weight; weighted_req_power[i] = frac_to_int(weight * req_power[i]); if (cdev->ops->state2power(cdev, instance->lower, &max_power[i])) continue; total_req_power += req_power[i]; max_allocatable_power += max_power[i]; total_weighted_req_power += weighted_req_power[i]; i++; } power_range = pid_controller(tz, control_temp, max_allocatable_power); divvy_up_power(weighted_req_power, max_power, num_actors, total_weighted_req_power, power_range, granted_power, extra_actor_power); total_granted_power = 0; i = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip_max_desired_temperature) continue; if (!cdev_is_power_actor(instance->cdev)) continue; power_actor_set_power(instance->cdev, instance, granted_power[i]); total_granted_power += granted_power[i]; i++; } trace_thermal_power_allocator(tz, req_power, total_req_power, granted_power, total_granted_power, num_actors, power_range, max_allocatable_power, tz->temperature, control_temp - tz->temperature); kfree(req_power); return ret; } /** * get_governor_trips() - get the number of the two trip points that are key for this governor * @tz: thermal zone to operate on * @params: pointer to private data for this governor * * The power allocator governor works optimally with two trips points: * a "switch on" trip point and a "maximum desired temperature". These * are defined as the first and last passive trip points. * * If there is only one trip point, then that's considered to be the * "maximum desired temperature" trip point and the governor is always * on. If there are no passive or active trip points, then the * governor won't do anything. In fact, its throttle function * won't be called at all. */ static void get_governor_trips(struct thermal_zone_device *tz, struct power_allocator_params *params) { int i, last_active, last_passive; bool found_first_passive; found_first_passive = false; last_active = INVALID_TRIP; last_passive = INVALID_TRIP; for (i = 0; i < tz->num_trips; i++) { struct thermal_trip trip; int ret; ret = __thermal_zone_get_trip(tz, i, &trip); if (ret) { dev_warn(&tz->device, "Failed to get trip point %d type: %d\n", i, ret); continue; } if (trip.type == THERMAL_TRIP_PASSIVE) { if (!found_first_passive) { params->trip_switch_on = i; found_first_passive = true; } else { last_passive = i; } } else if (trip.type == THERMAL_TRIP_ACTIVE) { last_active = i; } else { break; } } if (last_passive != INVALID_TRIP) { params->trip_max_desired_temperature = last_passive; } else if (found_first_passive) { params->trip_max_desired_temperature = params->trip_switch_on; params->trip_switch_on = INVALID_TRIP; } else { params->trip_switch_on = INVALID_TRIP; params->trip_max_desired_temperature = last_active; } } static void reset_pid_controller(struct power_allocator_params *params) { params->err_integral = 0; params->prev_err = 0; } static void allow_maximum_power(struct thermal_zone_device *tz, bool update) { struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; u32 req_power; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { struct thermal_cooling_device *cdev = instance->cdev; if ((instance->trip != params->trip_max_desired_temperature) || (!cdev_is_power_actor(instance->cdev))) continue; instance->target = 0; mutex_lock(&instance->cdev->lock); /* * Call for updating the cooling devices local stats and avoid * periods of dozen of seconds when those have not been * maintained. */ cdev->ops->get_requested_power(cdev, &req_power); if (update) __thermal_cdev_update(instance->cdev); mutex_unlock(&instance->cdev->lock); } } /** * check_power_actors() - Check all cooling devices and warn when they are * not power actors * @tz: thermal zone to operate on * * Check all cooling devices in the @tz and warn every time they are missing * power actor API. The warning should help to investigate the issue, which * could be e.g. lack of Energy Model for a given device. * * Return: 0 on success, -EINVAL if any cooling device does not implement * the power actor API. */ static int check_power_actors(struct thermal_zone_device *tz) { struct thermal_instance *instance; int ret = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (!cdev_is_power_actor(instance->cdev)) { dev_warn(&tz->device, "power_allocator: %s is not a power actor\n", instance->cdev->type); ret = -EINVAL; } } return ret; } /** * power_allocator_bind() - bind the power_allocator governor to a thermal zone * @tz: thermal zone to bind it to * * Initialize the PID controller parameters and bind it to the thermal * zone. * * Return: 0 on success, or -ENOMEM if we ran out of memory, or -EINVAL * when there are unsupported cooling devices in the @tz. */ static int power_allocator_bind(struct thermal_zone_device *tz) { int ret; struct power_allocator_params *params; struct thermal_trip trip; ret = check_power_actors(tz); if (ret) return ret; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; if (!tz->tzp) { tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL); if (!tz->tzp) { ret = -ENOMEM; goto free_params; } params->allocated_tzp = true; } if (!tz->tzp->sustainable_power) dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n"); get_governor_trips(tz, params); if (tz->num_trips > 0) { ret = __thermal_zone_get_trip(tz, params->trip_max_desired_temperature, &trip); if (!ret) estimate_pid_constants(tz, tz->tzp->sustainable_power, params->trip_switch_on, trip.temperature); } reset_pid_controller(params); tz->governor_data = params; return 0; free_params: kfree(params); return ret; } static void power_allocator_unbind(struct thermal_zone_device *tz) { struct power_allocator_params *params = tz->governor_data; dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); if (params->allocated_tzp) { kfree(tz->tzp); tz->tzp = NULL; } kfree(tz->governor_data); tz->governor_data = NULL; } static int power_allocator_throttle(struct thermal_zone_device *tz, int trip_id) { struct power_allocator_params *params = tz->governor_data; struct thermal_trip trip; int ret; bool update; lockdep_assert_held(&tz->lock); /* * We get called for every trip point but we only need to do * our calculations once */ if (trip_id != params->trip_max_desired_temperature) return 0; ret = __thermal_zone_get_trip(tz, params->trip_switch_on, &trip); if (!ret && (tz->temperature < trip.temperature)) { update = (tz->last_temperature >= trip.temperature); tz->passive = 0; reset_pid_controller(params); allow_maximum_power(tz, update); return 0; } tz->passive = 1; ret = __thermal_zone_get_trip(tz, params->trip_max_desired_temperature, &trip); if (ret) { dev_warn(&tz->device, "Failed to get the maximum desired temperature: %d\n", ret); return ret; } return allocate_power(tz, trip.temperature); } static struct thermal_governor thermal_gov_power_allocator = { .name = "power_allocator", .bind_to_tz = power_allocator_bind, .unbind_from_tz = power_allocator_unbind, .throttle = power_allocator_throttle, }; THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator);
linux-master
drivers/thermal/gov_power_allocator.c
// SPDX-License-Identifier: GPL-2.0 /* * thermal.c - sysfs interface of thermal devices * * Copyright (C) 2016 Eduardo Valentin <[email protected]> * * Highly based on original thermal_core.c * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <[email protected]> * Copyright (C) 2008 Sujith Thomas <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sysfs.h> #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/jiffies.h> #include "thermal_core.h" /* sys I/F for thermal zone */ static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); return sprintf(buf, "%s\n", tz->type); } static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); int temperature, ret; ret = thermal_zone_get_temp(tz, &temperature); if (ret) return ret; return sprintf(buf, "%d\n", temperature); } static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); int enabled; mutex_lock(&tz->lock); enabled = thermal_zone_device_is_enabled(tz); mutex_unlock(&tz->lock); return sprintf(buf, "%s\n", enabled ? "enabled" : "disabled"); } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); int result; if (!strncmp(buf, "enabled", sizeof("enabled") - 1)) result = thermal_zone_device_enable(tz); else if (!strncmp(buf, "disabled", sizeof("disabled") - 1)) result = thermal_zone_device_disable(tz); else result = -EINVAL; if (result) return result; return count; } static ssize_t trip_point_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_trip trip; int trip_id, result; if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1) return -EINVAL; mutex_lock(&tz->lock); if (device_is_registered(dev)) result = __thermal_zone_get_trip(tz, trip_id, &trip); else result = -ENODEV; mutex_unlock(&tz->lock); if (result) return result; switch (trip.type) { case THERMAL_TRIP_CRITICAL: return sprintf(buf, "critical\n"); case THERMAL_TRIP_HOT: return sprintf(buf, "hot\n"); case THERMAL_TRIP_PASSIVE: return sprintf(buf, "passive\n"); case THERMAL_TRIP_ACTIVE: return sprintf(buf, "active\n"); default: return sprintf(buf, "unknown\n"); } } static ssize_t trip_point_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_trip trip; int trip_id, ret; if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) return -EINVAL; mutex_lock(&tz->lock); if (!device_is_registered(dev)) { ret = -ENODEV; goto unlock; } ret = __thermal_zone_get_trip(tz, trip_id, &trip); if (ret) goto unlock; ret = kstrtoint(buf, 10, &trip.temperature); if (ret) goto unlock; ret = thermal_zone_set_trip(tz, trip_id, &trip); unlock: mutex_unlock(&tz->lock); return ret ? ret : count; } static ssize_t trip_point_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_trip trip; int trip_id, ret; if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) return -EINVAL; mutex_lock(&tz->lock); if (device_is_registered(dev)) ret = __thermal_zone_get_trip(tz, trip_id, &trip); else ret = -ENODEV; mutex_unlock(&tz->lock); if (ret) return ret; return sprintf(buf, "%d\n", trip.temperature); } static ssize_t trip_point_hyst_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_trip trip; int trip_id, ret; if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) return -EINVAL; mutex_lock(&tz->lock); if (!device_is_registered(dev)) { ret = -ENODEV; goto unlock; } ret = __thermal_zone_get_trip(tz, trip_id, &trip); if (ret) goto unlock; ret = kstrtoint(buf, 10, &trip.hysteresis); if (ret) goto unlock; ret = thermal_zone_set_trip(tz, trip_id, &trip); unlock: mutex_unlock(&tz->lock); return ret ? ret : count; } static ssize_t trip_point_hyst_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); struct thermal_trip trip; int trip_id, ret; if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) return -EINVAL; mutex_lock(&tz->lock); if (device_is_registered(dev)) ret = __thermal_zone_get_trip(tz, trip_id, &trip); else ret = -ENODEV; mutex_unlock(&tz->lock); return ret ? ret : sprintf(buf, "%d\n", trip.hysteresis); } static ssize_t policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); char name[THERMAL_NAME_LENGTH]; int ret; snprintf(name, sizeof(name), "%s", buf); ret = thermal_zone_device_set_policy(tz, name); if (!ret) ret = count; return ret; } static ssize_t policy_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); return sprintf(buf, "%s\n", tz->governor->name); } static ssize_t available_policies_show(struct device *dev, struct device_attribute *devattr, char *buf) { return thermal_build_list_of_policies(buf); } #if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) static ssize_t emul_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); int ret = 0; int temperature; if (kstrtoint(buf, 10, &temperature)) return -EINVAL; mutex_lock(&tz->lock); if (!device_is_registered(dev)) { ret = -ENODEV; goto unlock; } if (!tz->ops->set_emul_temp) tz->emul_temperature = temperature; else ret = tz->ops->set_emul_temp(tz, temperature); if (!ret) __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); unlock: mutex_unlock(&tz->lock); return ret ? ret : count; } static DEVICE_ATTR_WO(emul_temp); #endif static ssize_t sustainable_power_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); if (tz->tzp) return sprintf(buf, "%u\n", tz->tzp->sustainable_power); else return -EIO; } static ssize_t sustainable_power_store(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); u32 sustainable_power; if (!tz->tzp) return -EIO; if (kstrtou32(buf, 10, &sustainable_power)) return -EINVAL; tz->tzp->sustainable_power = sustainable_power; return count; } #define create_s32_tzp_attr(name) \ static ssize_t \ name##_show(struct device *dev, struct device_attribute *devattr, \ char *buf) \ { \ struct thermal_zone_device *tz = to_thermal_zone(dev); \ \ if (tz->tzp) \ return sprintf(buf, "%d\n", tz->tzp->name); \ else \ return -EIO; \ } \ \ static ssize_t \ name##_store(struct device *dev, struct device_attribute *devattr, \ const char *buf, size_t count) \ { \ struct thermal_zone_device *tz = to_thermal_zone(dev); \ s32 value; \ \ if (!tz->tzp) \ return -EIO; \ \ if (kstrtos32(buf, 10, &value)) \ return -EINVAL; \ \ tz->tzp->name = value; \ \ return count; \ } \ static DEVICE_ATTR_RW(name) create_s32_tzp_attr(k_po); create_s32_tzp_attr(k_pu); create_s32_tzp_attr(k_i); create_s32_tzp_attr(k_d); create_s32_tzp_attr(integral_cutoff); create_s32_tzp_attr(slope); create_s32_tzp_attr(offset); #undef create_s32_tzp_attr /* * These are thermal zone device attributes that will always be present. * All the attributes created for tzp (create_s32_tzp_attr) also are always * present on the sysfs interface. */ static DEVICE_ATTR_RO(type); static DEVICE_ATTR_RO(temp); static DEVICE_ATTR_RW(policy); static DEVICE_ATTR_RO(available_policies); static DEVICE_ATTR_RW(sustainable_power); /* These thermal zone device attributes are created based on conditions */ static DEVICE_ATTR_RW(mode); /* These attributes are unconditionally added to a thermal zone */ static struct attribute *thermal_zone_dev_attrs[] = { &dev_attr_type.attr, &dev_attr_temp.attr, #if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) &dev_attr_emul_temp.attr, #endif &dev_attr_policy.attr, &dev_attr_available_policies.attr, &dev_attr_sustainable_power.attr, &dev_attr_k_po.attr, &dev_attr_k_pu.attr, &dev_attr_k_i.attr, &dev_attr_k_d.attr, &dev_attr_integral_cutoff.attr, &dev_attr_slope.attr, &dev_attr_offset.attr, NULL, }; static const struct attribute_group thermal_zone_attribute_group = { .attrs = thermal_zone_dev_attrs, }; static struct attribute *thermal_zone_mode_attrs[] = { &dev_attr_mode.attr, NULL, }; static const struct attribute_group thermal_zone_mode_attribute_group = { .attrs = thermal_zone_mode_attrs, }; static const struct attribute_group *thermal_zone_attribute_groups[] = { &thermal_zone_attribute_group, &thermal_zone_mode_attribute_group, /* This is not NULL terminated as we create the group dynamically */ }; /** * create_trip_attrs() - create attributes for trip points * @tz: the thermal zone device * @mask: Writeable trip point bitmap. * * helper function to instantiate sysfs entries for every trip * point and its properties of a struct thermal_zone_device. * * Return: 0 on success, the proper error value otherwise. */ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) { struct attribute **attrs; int indx; /* This function works only for zones with at least one trip */ if (tz->num_trips <= 0) return -EINVAL; tz->trip_type_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_type_attrs), GFP_KERNEL); if (!tz->trip_type_attrs) return -ENOMEM; tz->trip_temp_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_temp_attrs), GFP_KERNEL); if (!tz->trip_temp_attrs) { kfree(tz->trip_type_attrs); return -ENOMEM; } tz->trip_hyst_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_hyst_attrs), GFP_KERNEL); if (!tz->trip_hyst_attrs) { kfree(tz->trip_type_attrs); kfree(tz->trip_temp_attrs); return -ENOMEM; } attrs = kcalloc(tz->num_trips * 3 + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) { kfree(tz->trip_type_attrs); kfree(tz->trip_temp_attrs); kfree(tz->trip_hyst_attrs); return -ENOMEM; } for (indx = 0; indx < tz->num_trips; indx++) { /* create trip type attribute */ snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, "trip_point_%d_type", indx); sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); tz->trip_type_attrs[indx].attr.attr.name = tz->trip_type_attrs[indx].name; tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_type_attrs[indx].attr.show = trip_point_type_show; attrs[indx] = &tz->trip_type_attrs[indx].attr.attr; /* create trip temp attribute */ snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, "trip_point_%d_temp", indx); sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); tz->trip_temp_attrs[indx].attr.attr.name = tz->trip_temp_attrs[indx].name; tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) && mask & (1 << indx)) { tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; tz->trip_temp_attrs[indx].attr.store = trip_point_temp_store; } attrs[indx + tz->num_trips] = &tz->trip_temp_attrs[indx].attr.attr; snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, "trip_point_%d_hyst", indx); sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); tz->trip_hyst_attrs[indx].attr.attr.name = tz->trip_hyst_attrs[indx].name; tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; if (tz->ops->set_trip_hyst) { tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; tz->trip_hyst_attrs[indx].attr.store = trip_point_hyst_store; } attrs[indx + tz->num_trips * 2] = &tz->trip_hyst_attrs[indx].attr.attr; } attrs[tz->num_trips * 3] = NULL; tz->trips_attribute_group.attrs = attrs; return 0; } /** * destroy_trip_attrs() - destroy attributes for trip points * @tz: the thermal zone device * * helper function to free resources allocated by create_trip_attrs() */ static void destroy_trip_attrs(struct thermal_zone_device *tz) { if (!tz) return; kfree(tz->trip_type_attrs); kfree(tz->trip_temp_attrs); kfree(tz->trip_hyst_attrs); kfree(tz->trips_attribute_group.attrs); } int thermal_zone_create_device_groups(struct thermal_zone_device *tz, int mask) { const struct attribute_group **groups; int i, size, result; /* we need one extra for trips and the NULL to terminate the array */ size = ARRAY_SIZE(thermal_zone_attribute_groups) + 2; /* This also takes care of API requirement to be NULL terminated */ groups = kcalloc(size, sizeof(*groups), GFP_KERNEL); if (!groups) return -ENOMEM; for (i = 0; i < size - 2; i++) groups[i] = thermal_zone_attribute_groups[i]; if (tz->num_trips) { result = create_trip_attrs(tz, mask); if (result) { kfree(groups); return result; } groups[size - 2] = &tz->trips_attribute_group; } tz->device.groups = groups; return 0; } void thermal_zone_destroy_device_groups(struct thermal_zone_device *tz) { if (!tz) return; if (tz->num_trips) destroy_trip_attrs(tz); kfree(tz->device.groups); } /* sys I/F for cooling device */ static ssize_t cdev_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); return sprintf(buf, "%s\n", cdev->type); } static ssize_t max_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); return sprintf(buf, "%ld\n", cdev->max_state); } static ssize_t cur_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int ret; ret = cdev->ops->get_cur_state(cdev, &state); if (ret) return ret; return sprintf(buf, "%ld\n", state); } static ssize_t cur_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_cooling_device *cdev = to_cooling_device(dev); unsigned long state; int result; if (sscanf(buf, "%ld\n", &state) != 1) return -EINVAL; if ((long)state < 0) return -EINVAL; /* Requested state should be less than max_state + 1 */ if (state > cdev->max_state) return -EINVAL; mutex_lock(&cdev->lock); result = cdev->ops->set_cur_state(cdev, state); if (!result) thermal_cooling_device_stats_update(cdev, state); mutex_unlock(&cdev->lock); return result ? result : count; } static struct device_attribute dev_attr_cdev_type = __ATTR(type, 0444, cdev_type_show, NULL); static DEVICE_ATTR_RO(max_state); static DEVICE_ATTR_RW(cur_state); static struct attribute *cooling_device_attrs[] = { &dev_attr_cdev_type.attr, &dev_attr_max_state.attr, &dev_attr_cur_state.attr, NULL, }; static const struct attribute_group cooling_device_attr_group = { .attrs = cooling_device_attrs, }; static const struct attribute_group *cooling_device_attr_groups[] = { &cooling_device_attr_group, NULL, /* Space allocated for cooling_device_stats_attr_group */ NULL, }; #ifdef CONFIG_THERMAL_STATISTICS struct cooling_dev_stats { spinlock_t lock; unsigned int total_trans; unsigned long state; ktime_t last_time; ktime_t *time_in_state; unsigned int *trans_table; }; static void update_time_in_state(struct cooling_dev_stats *stats) { ktime_t now = ktime_get(), delta; delta = ktime_sub(now, stats->last_time); stats->time_in_state[stats->state] = ktime_add(stats->time_in_state[stats->state], delta); stats->last_time = now; } void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, unsigned long new_state) { struct cooling_dev_stats *stats = cdev->stats; lockdep_assert_held(&cdev->lock); if (!stats) return; spin_lock(&stats->lock); if (stats->state == new_state) goto unlock; update_time_in_state(stats); stats->trans_table[stats->state * (cdev->max_state + 1) + new_state]++; stats->state = new_state; stats->total_trans++; unlock: spin_unlock(&stats->lock); } static ssize_t total_trans_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); struct cooling_dev_stats *stats; int ret = 0; mutex_lock(&cdev->lock); stats = cdev->stats; if (!stats) goto unlock; spin_lock(&stats->lock); ret = sprintf(buf, "%u\n", stats->total_trans); spin_unlock(&stats->lock); unlock: mutex_unlock(&cdev->lock); return ret; } static ssize_t time_in_state_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); struct cooling_dev_stats *stats; ssize_t len = 0; int i; mutex_lock(&cdev->lock); stats = cdev->stats; if (!stats) goto unlock; spin_lock(&stats->lock); update_time_in_state(stats); for (i = 0; i <= cdev->max_state; i++) { len += sprintf(buf + len, "state%u\t%llu\n", i, ktime_to_ms(stats->time_in_state[i])); } spin_unlock(&stats->lock); unlock: mutex_unlock(&cdev->lock); return len; } static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_cooling_device *cdev = to_cooling_device(dev); struct cooling_dev_stats *stats; int i, states; mutex_lock(&cdev->lock); stats = cdev->stats; if (!stats) goto unlock; states = cdev->max_state + 1; spin_lock(&stats->lock); stats->total_trans = 0; stats->last_time = ktime_get(); memset(stats->trans_table, 0, states * states * sizeof(*stats->trans_table)); for (i = 0; i < states; i++) stats->time_in_state[i] = ktime_set(0, 0); spin_unlock(&stats->lock); unlock: mutex_unlock(&cdev->lock); return count; } static ssize_t trans_table_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); struct cooling_dev_stats *stats; ssize_t len = 0; int i, j; mutex_lock(&cdev->lock); stats = cdev->stats; if (!stats) { len = -ENODATA; goto unlock; } len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); len += snprintf(buf + len, PAGE_SIZE - len, " : "); for (i = 0; i <= cdev->max_state; i++) { if (len >= PAGE_SIZE) break; len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i); } if (len >= PAGE_SIZE) { len = PAGE_SIZE; goto unlock; } len += snprintf(buf + len, PAGE_SIZE - len, "\n"); for (i = 0; i <= cdev->max_state; i++) { if (len >= PAGE_SIZE) break; len += snprintf(buf + len, PAGE_SIZE - len, "state%2u:", i); for (j = 0; j <= cdev->max_state; j++) { if (len >= PAGE_SIZE) break; len += snprintf(buf + len, PAGE_SIZE - len, "%8u ", stats->trans_table[i * (cdev->max_state + 1) + j]); } if (len >= PAGE_SIZE) break; len += snprintf(buf + len, PAGE_SIZE - len, "\n"); } if (len >= PAGE_SIZE) { pr_warn_once("Thermal transition table exceeds PAGE_SIZE. Disabling\n"); len = -EFBIG; } unlock: mutex_unlock(&cdev->lock); return len; } static DEVICE_ATTR_RO(total_trans); static DEVICE_ATTR_RO(time_in_state_ms); static DEVICE_ATTR_WO(reset); static DEVICE_ATTR_RO(trans_table); static struct attribute *cooling_device_stats_attrs[] = { &dev_attr_total_trans.attr, &dev_attr_time_in_state_ms.attr, &dev_attr_reset.attr, &dev_attr_trans_table.attr, NULL }; static const struct attribute_group cooling_device_stats_attr_group = { .attrs = cooling_device_stats_attrs, .name = "stats" }; static void cooling_device_stats_setup(struct thermal_cooling_device *cdev) { const struct attribute_group *stats_attr_group = NULL; struct cooling_dev_stats *stats; /* Total number of states is highest state + 1 */ unsigned long states = cdev->max_state + 1; int var; var = sizeof(*stats); var += sizeof(*stats->time_in_state) * states; var += sizeof(*stats->trans_table) * states * states; stats = kzalloc(var, GFP_KERNEL); if (!stats) goto out; stats->time_in_state = (ktime_t *)(stats + 1); stats->trans_table = (unsigned int *)(stats->time_in_state + states); cdev->stats = stats; stats->last_time = ktime_get(); spin_lock_init(&stats->lock); stats_attr_group = &cooling_device_stats_attr_group; out: /* Fill the empty slot left in cooling_device_attr_groups */ var = ARRAY_SIZE(cooling_device_attr_groups) - 2; cooling_device_attr_groups[var] = stats_attr_group; } static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev) { kfree(cdev->stats); cdev->stats = NULL; } #else static inline void cooling_device_stats_setup(struct thermal_cooling_device *cdev) {} static inline void cooling_device_stats_destroy(struct thermal_cooling_device *cdev) {} #endif /* CONFIG_THERMAL_STATISTICS */ void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *cdev) { cooling_device_stats_setup(cdev); cdev->device.groups = cooling_device_attr_groups; } void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev) { cooling_device_stats_destroy(cdev); } void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev) { lockdep_assert_held(&cdev->lock); cooling_device_stats_destroy(cdev); cooling_device_stats_setup(cdev); } /* these helper will be used only at the time of bindig */ ssize_t trip_point_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_instance *instance; instance = container_of(attr, struct thermal_instance, attr); return sprintf(buf, "%d\n", instance->trip); } ssize_t weight_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_instance *instance; instance = container_of(attr, struct thermal_instance, weight_attr); return sprintf(buf, "%d\n", instance->weight); } ssize_t weight_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_instance *instance; int ret, weight; ret = kstrtoint(buf, 0, &weight); if (ret) return ret; instance = container_of(attr, struct thermal_instance, weight_attr); instance->weight = weight; return count; }
linux-master
drivers/thermal/thermal_sysfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * gov_bang_bang.c - A simple thermal throttling governor using hysteresis * * Copyright (C) 2014 Peter Kaestle <[email protected]> * * Based on step_wise.c with following Copyrights: * Copyright (C) 2012 Intel Corp * Copyright (C) 2012 Durgadoss R <[email protected]> */ #include <linux/thermal.h> #include "thermal_core.h" static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id) { struct thermal_trip trip; struct thermal_instance *instance; int ret; ret = __thermal_zone_get_trip(tz, trip_id, &trip); if (ret) { pr_warn_once("Failed to retrieve trip point %d\n", trip_id); return ret; } if (!trip.hysteresis) dev_info_once(&tz->device, "Zero hysteresis value for thermal zone %s\n", tz->type); dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n", trip_id, trip.temperature, tz->temperature, trip.hysteresis); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip_id) continue; /* in case fan is in initial state, switch the fan off */ if (instance->target == THERMAL_NO_TARGET) instance->target = 0; /* in case fan is neither on nor off set the fan to active */ if (instance->target != 0 && instance->target != 1) { pr_warn("Thermal instance %s controlled by bang-bang has unexpected state: %ld\n", instance->name, instance->target); instance->target = 1; } /* * enable fan when temperature exceeds trip_temp and disable * the fan in case it falls below trip_temp minus hysteresis */ if (instance->target == 0 && tz->temperature >= trip.temperature) instance->target = 1; else if (instance->target == 1 && tz->temperature <= trip.temperature - trip.hysteresis) instance->target = 0; dev_dbg(&instance->cdev->device, "target=%d\n", (int)instance->target); mutex_lock(&instance->cdev->lock); instance->cdev->updated = false; /* cdev needs update */ mutex_unlock(&instance->cdev->lock); } return 0; } /** * bang_bang_control - controls devices associated with the given zone * @tz: thermal_zone_device * @trip: the trip point * * Regulation Logic: a two point regulation, deliver cooling state depending * on the previous state shown in this diagram: * * Fan: OFF ON * * | * | * trip_temp: +---->+ * | | ^ * | | | * | | Temperature * (trip_temp - hyst): +<----+ * | * | * | * * * If the fan is not running and temperature exceeds trip_temp, the fan * gets turned on. * * In case the fan is running, temperature must fall below * (trip_temp - hyst) so that the fan gets turned off again. * */ static int bang_bang_control(struct thermal_zone_device *tz, int trip) { struct thermal_instance *instance; int ret; lockdep_assert_held(&tz->lock); ret = thermal_zone_trip_update(tz, trip); if (ret) return ret; list_for_each_entry(instance, &tz->thermal_instances, tz_node) thermal_cdev_update(instance->cdev); return 0; } static struct thermal_governor thermal_gov_bang_bang = { .name = "bang_bang", .throttle = bang_bang_control, }; THERMAL_GOVERNOR_DECLARE(thermal_gov_bang_bang);
linux-master
drivers/thermal/gov_bang_bang.c
// SPDX-License-Identifier: GPL-2.0 /* * Thermal sensor driver for Allwinner SOC * Copyright (C) 2019 Yangtao Li * * Based on the work of Icenowy Zheng <[email protected]> * Based on the work of Ondrej Jirman <[email protected]> * Based on the work of Josef Gajdusek <[email protected]> */ #include <linux/bitmap.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/slab.h> #include <linux/thermal.h> #include "thermal_hwmon.h" #define MAX_SENSOR_NUM 4 #define FT_TEMP_MASK GENMASK(11, 0) #define TEMP_CALIB_MASK GENMASK(11, 0) #define CALIBRATE_DEFAULT 0x800 #define SUN8I_THS_CTRL0 0x00 #define SUN8I_THS_CTRL2 0x40 #define SUN8I_THS_IC 0x44 #define SUN8I_THS_IS 0x48 #define SUN8I_THS_MFC 0x70 #define SUN8I_THS_TEMP_CALIB 0x74 #define SUN8I_THS_TEMP_DATA 0x80 #define SUN50I_THS_CTRL0 0x00 #define SUN50I_H6_THS_ENABLE 0x04 #define SUN50I_H6_THS_PC 0x08 #define SUN50I_H6_THS_DIC 0x10 #define SUN50I_H6_THS_DIS 0x20 #define SUN50I_H6_THS_MFC 0x30 #define SUN50I_H6_THS_TEMP_CALIB 0xa0 #define SUN50I_H6_THS_TEMP_DATA 0xc0 #define SUN8I_THS_CTRL0_T_ACQ0(x) (GENMASK(15, 0) & (x)) #define SUN8I_THS_CTRL2_T_ACQ1(x) ((GENMASK(15, 0) & (x)) << 16) #define SUN8I_THS_DATA_IRQ_STS(x) BIT(x + 8) #define SUN50I_THS_CTRL0_T_ACQ(x) ((GENMASK(15, 0) & (x)) << 16) #define SUN50I_THS_FILTER_EN BIT(2) #define SUN50I_THS_FILTER_TYPE(x) (GENMASK(1, 0) & (x)) #define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12) #define SUN50I_H6_THS_DATA_IRQ_STS(x) BIT(x) struct tsensor { struct ths_device *tmdev; struct thermal_zone_device *tzd; int id; }; struct ths_thermal_chip { bool has_mod_clk; bool has_bus_clk_reset; int sensor_num; int offset; int scale; int ft_deviation; int temp_data_base; int (*calibrate)(struct ths_device *tmdev, u16 *caldata, int callen); int (*init)(struct ths_device *tmdev); unsigned long (*irq_ack)(struct ths_device *tmdev); int (*calc_temp)(struct ths_device *tmdev, int id, int reg); }; struct ths_device { const struct ths_thermal_chip *chip; struct device *dev; struct regmap *regmap; struct reset_control *reset; struct clk *bus_clk; struct clk *mod_clk; struct tsensor sensor[MAX_SENSOR_NUM]; }; /* Temp Unit: millidegree Celsius */ static int sun8i_ths_calc_temp(struct ths_device *tmdev, int id, int reg) { return tmdev->chip->offset - (reg * tmdev->chip->scale / 10); } static int sun50i_h5_calc_temp(struct ths_device *tmdev, int id, int reg) { if (reg >= 0x500) return -1191 * reg / 10 + 223000; else if (!id) return -1452 * reg / 10 + 259000; else return -1590 * reg / 10 + 276000; } static int sun8i_ths_get_temp(struct thermal_zone_device *tz, int *temp) { struct tsensor *s = thermal_zone_device_priv(tz); struct ths_device *tmdev = s->tmdev; int val = 0; regmap_read(tmdev->regmap, tmdev->chip->temp_data_base + 0x4 * s->id, &val); /* ths have no data yet */ if (!val) return -EAGAIN; *temp = tmdev->chip->calc_temp(tmdev, s->id, val); /* * According to the original sdk, there are some platforms(rarely) * that add a fixed offset value after calculating the temperature * value. We can't simply put it on the formula for calculating the * temperature above, because the formula for calculating the * temperature above is also used when the sensor is calibrated. If * do this, the correct calibration formula is hard to know. */ *temp += tmdev->chip->ft_deviation; return 0; } static const struct thermal_zone_device_ops ths_ops = { .get_temp = sun8i_ths_get_temp, }; static const struct regmap_config config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .fast_io = true, .max_register = 0xfc, }; static unsigned long sun8i_h3_irq_ack(struct ths_device *tmdev) { unsigned long irq_bitmap = 0; int i, state; regmap_read(tmdev->regmap, SUN8I_THS_IS, &state); for (i = 0; i < tmdev->chip->sensor_num; i++) { if (state & SUN8I_THS_DATA_IRQ_STS(i)) { regmap_write(tmdev->regmap, SUN8I_THS_IS, SUN8I_THS_DATA_IRQ_STS(i)); bitmap_set(&irq_bitmap, i, 1); } } return irq_bitmap; } static unsigned long sun50i_h6_irq_ack(struct ths_device *tmdev) { unsigned long irq_bitmap = 0; int i, state; regmap_read(tmdev->regmap, SUN50I_H6_THS_DIS, &state); for (i = 0; i < tmdev->chip->sensor_num; i++) { if (state & SUN50I_H6_THS_DATA_IRQ_STS(i)) { regmap_write(tmdev->regmap, SUN50I_H6_THS_DIS, SUN50I_H6_THS_DATA_IRQ_STS(i)); bitmap_set(&irq_bitmap, i, 1); } } return irq_bitmap; } static irqreturn_t sun8i_irq_thread(int irq, void *data) { struct ths_device *tmdev = data; unsigned long irq_bitmap = tmdev->chip->irq_ack(tmdev); int i; for_each_set_bit(i, &irq_bitmap, tmdev->chip->sensor_num) { thermal_zone_device_update(tmdev->sensor[i].tzd, THERMAL_EVENT_UNSPECIFIED); } return IRQ_HANDLED; } static int sun8i_h3_ths_calibrate(struct ths_device *tmdev, u16 *caldata, int callen) { int i; if (!caldata[0] || callen < 2 * tmdev->chip->sensor_num) return -EINVAL; for (i = 0; i < tmdev->chip->sensor_num; i++) { int offset = (i % 2) << 4; regmap_update_bits(tmdev->regmap, SUN8I_THS_TEMP_CALIB + (4 * (i >> 1)), TEMP_CALIB_MASK << offset, caldata[i] << offset); } return 0; } static int sun50i_h6_ths_calibrate(struct ths_device *tmdev, u16 *caldata, int callen) { struct device *dev = tmdev->dev; int i, ft_temp; if (!caldata[0] || callen < 2 + 2 * tmdev->chip->sensor_num) return -EINVAL; /* * efuse layout: * * 0 11 16 32 * +-------+-------+-------+ * |temp| |sensor0|sensor1| * +-------+-------+-------+ * * The calibration data on the H6 is the ambient temperature and * sensor values that are filled during the factory test stage. * * The unit of stored FT temperature is 0.1 degree celsius. * * We need to calculate a delta between measured and caluclated * register values and this will become a calibration offset. */ ft_temp = (caldata[0] & FT_TEMP_MASK) * 100; for (i = 0; i < tmdev->chip->sensor_num; i++) { int sensor_reg = caldata[i + 1] & TEMP_CALIB_MASK; int cdata, offset; int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg); /* * Calibration data is CALIBRATE_DEFAULT - (calculated * temperature from sensor reading at factory temperature * minus actual factory temperature) * 14.88 (scale from * temperature to register values) */ cdata = CALIBRATE_DEFAULT - ((sensor_temp - ft_temp) * 10 / tmdev->chip->scale); if (cdata & ~TEMP_CALIB_MASK) { /* * Calibration value more than 12-bit, but calibration * register is 12-bit. In this case, ths hardware can * still work without calibration, although the data * won't be so accurate. */ dev_warn(dev, "sensor%d is not calibrated.\n", i); continue; } offset = (i % 2) * 16; regmap_update_bits(tmdev->regmap, SUN50I_H6_THS_TEMP_CALIB + (i / 2 * 4), TEMP_CALIB_MASK << offset, cdata << offset); } return 0; } static int sun8i_ths_calibrate(struct ths_device *tmdev) { struct nvmem_cell *calcell; struct device *dev = tmdev->dev; u16 *caldata; size_t callen; int ret = 0; calcell = nvmem_cell_get(dev, "calibration"); if (IS_ERR(calcell)) { if (PTR_ERR(calcell) == -EPROBE_DEFER) return -EPROBE_DEFER; /* * Even if the external calibration data stored in sid is * not accessible, the THS hardware can still work, although * the data won't be so accurate. * * The default value of calibration register is 0x800 for * every sensor, and the calibration value is usually 0x7xx * or 0x8xx, so they won't be away from the default value * for a lot. * * So here we do not return error if the calibration data is * not available, except the probe needs deferring. */ goto out; } caldata = nvmem_cell_read(calcell, &callen); if (IS_ERR(caldata)) { ret = PTR_ERR(caldata); goto out; } tmdev->chip->calibrate(tmdev, caldata, callen); kfree(caldata); out: if (!IS_ERR(calcell)) nvmem_cell_put(calcell); return ret; } static void sun8i_ths_reset_control_assert(void *data) { reset_control_assert(data); } static int sun8i_ths_resource_init(struct ths_device *tmdev) { struct device *dev = tmdev->dev; struct platform_device *pdev = to_platform_device(dev); void __iomem *base; int ret; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); tmdev->regmap = devm_regmap_init_mmio(dev, base, &config); if (IS_ERR(tmdev->regmap)) return PTR_ERR(tmdev->regmap); if (tmdev->chip->has_bus_clk_reset) { tmdev->reset = devm_reset_control_get(dev, NULL); if (IS_ERR(tmdev->reset)) return PTR_ERR(tmdev->reset); ret = reset_control_deassert(tmdev->reset); if (ret) return ret; ret = devm_add_action_or_reset(dev, sun8i_ths_reset_control_assert, tmdev->reset); if (ret) return ret; tmdev->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus"); if (IS_ERR(tmdev->bus_clk)) return PTR_ERR(tmdev->bus_clk); } if (tmdev->chip->has_mod_clk) { tmdev->mod_clk = devm_clk_get_enabled(&pdev->dev, "mod"); if (IS_ERR(tmdev->mod_clk)) return PTR_ERR(tmdev->mod_clk); } ret = clk_set_rate(tmdev->mod_clk, 24000000); if (ret) return ret; ret = sun8i_ths_calibrate(tmdev); if (ret) return ret; return 0; } static int sun8i_h3_thermal_init(struct ths_device *tmdev) { int val; /* average over 4 samples */ regmap_write(tmdev->regmap, SUN8I_THS_MFC, SUN50I_THS_FILTER_EN | SUN50I_THS_FILTER_TYPE(1)); /* * clkin = 24MHz * filter_samples = 4 * period = 0.25s * * x = period * clkin / 4096 / filter_samples - 1 * = 365 */ val = GENMASK(7 + tmdev->chip->sensor_num, 8); regmap_write(tmdev->regmap, SUN8I_THS_IC, SUN50I_H6_THS_PC_TEMP_PERIOD(365) | val); /* * T_acq = 20us * clkin = 24MHz * * x = T_acq * clkin - 1 * = 479 */ regmap_write(tmdev->regmap, SUN8I_THS_CTRL0, SUN8I_THS_CTRL0_T_ACQ0(479)); val = GENMASK(tmdev->chip->sensor_num - 1, 0); regmap_write(tmdev->regmap, SUN8I_THS_CTRL2, SUN8I_THS_CTRL2_T_ACQ1(479) | val); return 0; } /* * Without this undocumented value, the returned temperatures would * be higher than real ones by about 20C. */ #define SUN50I_H6_CTRL0_UNK 0x0000002f static int sun50i_h6_thermal_init(struct ths_device *tmdev) { int val; /* * T_acq = 20us * clkin = 24MHz * * x = T_acq * clkin - 1 * = 479 */ regmap_write(tmdev->regmap, SUN50I_THS_CTRL0, SUN50I_H6_CTRL0_UNK | SUN50I_THS_CTRL0_T_ACQ(479)); /* average over 4 samples */ regmap_write(tmdev->regmap, SUN50I_H6_THS_MFC, SUN50I_THS_FILTER_EN | SUN50I_THS_FILTER_TYPE(1)); /* * clkin = 24MHz * filter_samples = 4 * period = 0.25s * * x = period * clkin / 4096 / filter_samples - 1 * = 365 */ regmap_write(tmdev->regmap, SUN50I_H6_THS_PC, SUN50I_H6_THS_PC_TEMP_PERIOD(365)); /* enable sensor */ val = GENMASK(tmdev->chip->sensor_num - 1, 0); regmap_write(tmdev->regmap, SUN50I_H6_THS_ENABLE, val); /* thermal data interrupt enable */ val = GENMASK(tmdev->chip->sensor_num - 1, 0); regmap_write(tmdev->regmap, SUN50I_H6_THS_DIC, val); return 0; } static int sun8i_ths_register(struct ths_device *tmdev) { int i; for (i = 0; i < tmdev->chip->sensor_num; i++) { tmdev->sensor[i].tmdev = tmdev; tmdev->sensor[i].id = i; tmdev->sensor[i].tzd = devm_thermal_of_zone_register(tmdev->dev, i, &tmdev->sensor[i], &ths_ops); if (IS_ERR(tmdev->sensor[i].tzd)) return PTR_ERR(tmdev->sensor[i].tzd); devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd); } return 0; } static int sun8i_ths_probe(struct platform_device *pdev) { struct ths_device *tmdev; struct device *dev = &pdev->dev; int ret, irq; tmdev = devm_kzalloc(dev, sizeof(*tmdev), GFP_KERNEL); if (!tmdev) return -ENOMEM; tmdev->dev = dev; tmdev->chip = of_device_get_match_data(&pdev->dev); if (!tmdev->chip) return -EINVAL; ret = sun8i_ths_resource_init(tmdev); if (ret) return ret; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = tmdev->chip->init(tmdev); if (ret) return ret; ret = sun8i_ths_register(tmdev); if (ret) return ret; /* * Avoid entering the interrupt handler, the thermal device is not * registered yet, we deffer the registration of the interrupt to * the end. */ ret = devm_request_threaded_irq(dev, irq, NULL, sun8i_irq_thread, IRQF_ONESHOT, "ths", tmdev); if (ret) return ret; return 0; } static const struct ths_thermal_chip sun8i_a83t_ths = { .sensor_num = 3, .scale = 705, .offset = 191668, .temp_data_base = SUN8I_THS_TEMP_DATA, .calibrate = sun8i_h3_ths_calibrate, .init = sun8i_h3_thermal_init, .irq_ack = sun8i_h3_irq_ack, .calc_temp = sun8i_ths_calc_temp, }; static const struct ths_thermal_chip sun8i_h3_ths = { .sensor_num = 1, .scale = 1211, .offset = 217000, .has_mod_clk = true, .has_bus_clk_reset = true, .temp_data_base = SUN8I_THS_TEMP_DATA, .calibrate = sun8i_h3_ths_calibrate, .init = sun8i_h3_thermal_init, .irq_ack = sun8i_h3_irq_ack, .calc_temp = sun8i_ths_calc_temp, }; static const struct ths_thermal_chip sun8i_r40_ths = { .sensor_num = 2, .offset = 251086, .scale = 1130, .has_mod_clk = true, .has_bus_clk_reset = true, .temp_data_base = SUN8I_THS_TEMP_DATA, .calibrate = sun8i_h3_ths_calibrate, .init = sun8i_h3_thermal_init, .irq_ack = sun8i_h3_irq_ack, .calc_temp = sun8i_ths_calc_temp, }; static const struct ths_thermal_chip sun50i_a64_ths = { .sensor_num = 3, .offset = 260890, .scale = 1170, .has_mod_clk = true, .has_bus_clk_reset = true, .temp_data_base = SUN8I_THS_TEMP_DATA, .calibrate = sun8i_h3_ths_calibrate, .init = sun8i_h3_thermal_init, .irq_ack = sun8i_h3_irq_ack, .calc_temp = sun8i_ths_calc_temp, }; static const struct ths_thermal_chip sun50i_a100_ths = { .sensor_num = 3, .has_bus_clk_reset = true, .ft_deviation = 8000, .offset = 187744, .scale = 672, .temp_data_base = SUN50I_H6_THS_TEMP_DATA, .calibrate = sun50i_h6_ths_calibrate, .init = sun50i_h6_thermal_init, .irq_ack = sun50i_h6_irq_ack, .calc_temp = sun8i_ths_calc_temp, }; static const struct ths_thermal_chip sun50i_h5_ths = { .sensor_num = 2, .has_mod_clk = true, .has_bus_clk_reset = true, .temp_data_base = SUN8I_THS_TEMP_DATA, .calibrate = sun8i_h3_ths_calibrate, .init = sun8i_h3_thermal_init, .irq_ack = sun8i_h3_irq_ack, .calc_temp = sun50i_h5_calc_temp, }; static const struct ths_thermal_chip sun50i_h6_ths = { .sensor_num = 2, .has_bus_clk_reset = true, .ft_deviation = 7000, .offset = 187744, .scale = 672, .temp_data_base = SUN50I_H6_THS_TEMP_DATA, .calibrate = sun50i_h6_ths_calibrate, .init = sun50i_h6_thermal_init, .irq_ack = sun50i_h6_irq_ack, .calc_temp = sun8i_ths_calc_temp, }; static const struct of_device_id of_ths_match[] = { { .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths }, { .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths }, { .compatible = "allwinner,sun8i-r40-ths", .data = &sun8i_r40_ths }, { .compatible = "allwinner,sun50i-a64-ths", .data = &sun50i_a64_ths }, { .compatible = "allwinner,sun50i-a100-ths", .data = &sun50i_a100_ths }, { .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths }, { .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, of_ths_match); static struct platform_driver ths_driver = { .probe = sun8i_ths_probe, .driver = { .name = "sun8i-thermal", .of_match_table = of_ths_match, }, }; module_platform_driver(ths_driver); MODULE_DESCRIPTION("Thermal sensor driver for Allwinner SOC"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/sun8i_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <[email protected]> * Copyright (C) 2008 Sujith Thomas <[email protected]> * Copyright 2022 Linaro Limited * * Thermal trips handling */ #include "thermal_core.h" int for_each_thermal_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data) { int i, ret; lockdep_assert_held(&tz->lock); if (!tz->trips) return -ENODATA; for (i = 0; i < tz->num_trips; i++) { ret = cb(&tz->trips[i], data); if (ret) return ret; } return 0; } EXPORT_SYMBOL_GPL(for_each_thermal_trip); int thermal_zone_get_num_trips(struct thermal_zone_device *tz) { return tz->num_trips; } EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips); /** * __thermal_zone_set_trips - Computes the next trip points for the driver * @tz: a pointer to a thermal zone device structure * * The function computes the next temperature boundaries by browsing * the trip points. The result is the closer low and high trip points * to the current temperature. These values are passed to the backend * driver to let it set its own notification mechanism (usually an * interrupt). * * This function must be called with tz->lock held. Both tz and tz->ops * must be valid pointers. * * It does not return a value */ void __thermal_zone_set_trips(struct thermal_zone_device *tz) { struct thermal_trip trip; int low = -INT_MAX, high = INT_MAX; int i, ret; lockdep_assert_held(&tz->lock); if (!tz->ops->set_trips) return; for (i = 0; i < tz->num_trips; i++) { int trip_low; ret = __thermal_zone_get_trip(tz, i , &trip); if (ret) return; trip_low = trip.temperature - trip.hysteresis; if (trip_low < tz->temperature && trip_low > low) low = trip_low; if (trip.temperature > tz->temperature && trip.temperature < high) high = trip.temperature; } /* No need to change trip points */ if (tz->prev_low_trip == low && tz->prev_high_trip == high) return; tz->prev_low_trip = low; tz->prev_high_trip = high; dev_dbg(&tz->device, "new temperature boundaries: %d < x < %d\n", low, high); /* * Set a temperature window. When this window is left the driver * must inform the thermal core via thermal_zone_device_update. */ ret = tz->ops->set_trips(tz, low, high); if (ret) dev_err(&tz->device, "Failed to set trips: %d\n", ret); } int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, struct thermal_trip *trip) { if (!tz || !tz->trips || trip_id < 0 || trip_id >= tz->num_trips || !trip) return -EINVAL; *trip = tz->trips[trip_id]; return 0; } EXPORT_SYMBOL_GPL(__thermal_zone_get_trip); int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, struct thermal_trip *trip) { int ret; mutex_lock(&tz->lock); ret = __thermal_zone_get_trip(tz, trip_id, trip); mutex_unlock(&tz->lock); return ret; } EXPORT_SYMBOL_GPL(thermal_zone_get_trip); int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id, const struct thermal_trip *trip) { struct thermal_trip t; int ret; if (!tz->ops->set_trip_temp && !tz->ops->set_trip_hyst && !tz->trips) return -EINVAL; ret = __thermal_zone_get_trip(tz, trip_id, &t); if (ret) return ret; if (t.type != trip->type) return -EINVAL; if (t.temperature != trip->temperature && tz->ops->set_trip_temp) { ret = tz->ops->set_trip_temp(tz, trip_id, trip->temperature); if (ret) return ret; } if (t.hysteresis != trip->hysteresis && tz->ops->set_trip_hyst) { ret = tz->ops->set_trip_hyst(tz, trip_id, trip->hysteresis); if (ret) return ret; } if (tz->trips && (t.temperature != trip->temperature || t.hysteresis != trip->hysteresis)) tz->trips[trip_id] = *trip; thermal_notify_tz_trip_change(tz->id, trip_id, trip->type, trip->temperature, trip->hysteresis); __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); return 0; }
linux-master
drivers/thermal/thermal_trip.c
// SPDX-License-Identifier: GPL-2.0-only /* * step_wise.c - A step-by-step Thermal throttling governor * * Copyright (C) 2012 Intel Corp * Copyright (C) 2012 Durgadoss R <[email protected]> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/thermal.h> #include <linux/minmax.h> #include "thermal_trace.h" #include "thermal_core.h" /* * If the temperature is higher than a trip point, * a. if the trend is THERMAL_TREND_RAISING, use higher cooling * state for this trip point * b. if the trend is THERMAL_TREND_DROPPING, do nothing * If the temperature is lower than a trip point, * a. if the trend is THERMAL_TREND_RAISING, do nothing * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling * state for this trip point, if the cooling state already * equals lower limit, deactivate the thermal instance */ static unsigned long get_target_state(struct thermal_instance *instance, enum thermal_trend trend, bool throttle) { struct thermal_cooling_device *cdev = instance->cdev; unsigned long cur_state; unsigned long next_target; /* * We keep this instance the way it is by default. * Otherwise, we use the current state of the * cdev in use to determine the next_target. */ cdev->ops->get_cur_state(cdev, &cur_state); next_target = instance->target; dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state); if (!instance->initialized) { if (throttle) { next_target = clamp((cur_state + 1), instance->lower, instance->upper); } else { next_target = THERMAL_NO_TARGET; } return next_target; } if (throttle) { if (trend == THERMAL_TREND_RAISING) next_target = clamp((cur_state + 1), instance->lower, instance->upper); } else { if (trend == THERMAL_TREND_DROPPING) { if (cur_state <= instance->lower) next_target = THERMAL_NO_TARGET; else next_target = clamp((cur_state - 1), instance->lower, instance->upper); } } return next_target; } static void update_passive_instance(struct thermal_zone_device *tz, enum thermal_trip_type type, int value) { /* * If value is +1, activate a passive instance. * If value is -1, deactivate a passive instance. */ if (type == THERMAL_TRIP_PASSIVE) tz->passive += value; } static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id) { enum thermal_trend trend; struct thermal_instance *instance; struct thermal_trip trip; bool throttle = false; int old_target; __thermal_zone_get_trip(tz, trip_id, &trip); trend = get_tz_trend(tz, trip_id); if (tz->temperature >= trip.temperature) { throttle = true; trace_thermal_zone_trip(tz, trip_id, trip.type); } dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n", trip_id, trip.type, trip.temperature, trend, throttle); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip_id) continue; old_target = instance->target; instance->target = get_target_state(instance, trend, throttle); dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n", old_target, (int)instance->target); if (instance->initialized && old_target == instance->target) continue; /* Activate a passive thermal instance */ if (old_target == THERMAL_NO_TARGET && instance->target != THERMAL_NO_TARGET) update_passive_instance(tz, trip.type, 1); /* Deactivate a passive thermal instance */ else if (old_target != THERMAL_NO_TARGET && instance->target == THERMAL_NO_TARGET) update_passive_instance(tz, trip.type, -1); instance->initialized = true; mutex_lock(&instance->cdev->lock); instance->cdev->updated = false; /* cdev needs update */ mutex_unlock(&instance->cdev->lock); } } /** * step_wise_throttle - throttles devices associated with the given zone * @tz: thermal_zone_device * @trip: trip point index * * Throttling Logic: This uses the trend of the thermal zone to throttle. * If the thermal zone is 'heating up' this throttles all the cooling * devices associated with the zone and its particular trip point, by one * step. If the zone is 'cooling down' it brings back the performance of * the devices by one step. */ static int step_wise_throttle(struct thermal_zone_device *tz, int trip) { struct thermal_instance *instance; lockdep_assert_held(&tz->lock); thermal_zone_trip_update(tz, trip); list_for_each_entry(instance, &tz->thermal_instances, tz_node) thermal_cdev_update(instance->cdev); return 0; } static struct thermal_governor thermal_gov_step_wise = { .name = "step_wise", .throttle = step_wise_throttle, }; THERMAL_GOVERNOR_DECLARE(thermal_gov_step_wise);
linux-master
drivers/thermal/gov_step_wise.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2020 Linaro Limited * * Author: Daniel Lezcano <[email protected]> * * Generic netlink for thermal management framework */ #include <linux/module.h> #include <linux/kernel.h> #include <net/genetlink.h> #include <uapi/linux/thermal.h> #include "thermal_core.h" static const struct genl_multicast_group thermal_genl_mcgrps[] = { { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, }, { .name = THERMAL_GENL_EVENT_GROUP_NAME, }, }; static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = { /* Thermal zone */ [THERMAL_GENL_ATTR_TZ] = { .type = NLA_NESTED }, [THERMAL_GENL_ATTR_TZ_ID] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_TEMP] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_TRIP] = { .type = NLA_NESTED }, [THERMAL_GENL_ATTR_TZ_TRIP_ID] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_TRIP_TEMP] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_TRIP_TYPE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_TRIP_HYST] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_MODE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_TZ_NAME] = { .type = NLA_STRING, .len = THERMAL_NAME_LENGTH }, /* Governor(s) */ [THERMAL_GENL_ATTR_TZ_GOV] = { .type = NLA_NESTED }, [THERMAL_GENL_ATTR_TZ_GOV_NAME] = { .type = NLA_STRING, .len = THERMAL_NAME_LENGTH }, /* Cooling devices */ [THERMAL_GENL_ATTR_CDEV] = { .type = NLA_NESTED }, [THERMAL_GENL_ATTR_CDEV_ID] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CDEV_CUR_STATE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING, .len = THERMAL_NAME_LENGTH }, /* CPU capabilities */ [THERMAL_GENL_ATTR_CPU_CAPABILITY] = { .type = NLA_NESTED }, [THERMAL_GENL_ATTR_CPU_CAPABILITY_ID] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY] = { .type = NLA_U32 }, }; struct param { struct nlattr **attrs; struct sk_buff *msg; const char *name; int tz_id; int cdev_id; int trip_id; int trip_temp; int trip_type; int trip_hyst; int temp; int cdev_state; int cdev_max_state; struct thermal_genl_cpu_caps *cpu_capabilities; int cpu_capabilities_count; }; typedef int (*cb_t)(struct param *); static struct genl_family thermal_gnl_family; /************************** Sampling encoding *******************************/ int thermal_genl_sampling_temp(int id, int temp) { struct sk_buff *skb; void *hdr; skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0, THERMAL_GENL_SAMPLING_TEMP); if (!hdr) goto out_free; if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_ID, id)) goto out_cancel; if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_TEMP, temp)) goto out_cancel; genlmsg_end(skb, hdr); genlmsg_multicast(&thermal_gnl_family, skb, 0, 0, GFP_KERNEL); return 0; out_cancel: genlmsg_cancel(skb, hdr); out_free: nlmsg_free(skb); return -EMSGSIZE; } /**************************** Event encoding *********************************/ static int thermal_genl_event_tz_create(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || nla_put_string(p->msg, THERMAL_GENL_ATTR_TZ_NAME, p->name)) return -EMSGSIZE; return 0; } static int thermal_genl_event_tz(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id)) return -EMSGSIZE; return 0; } static int thermal_genl_event_tz_trip_up(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TEMP, p->temp)) return -EMSGSIZE; return 0; } static int thermal_genl_event_tz_trip_add(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, p->trip_type) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, p->trip_temp) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, p->trip_hyst)) return -EMSGSIZE; return 0; } static int thermal_genl_event_tz_trip_delete(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id)) return -EMSGSIZE; return 0; } static int thermal_genl_event_cdev_add(struct param *p) { if (nla_put_string(p->msg, THERMAL_GENL_ATTR_CDEV_NAME, p->name) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_ID, p->cdev_id) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_MAX_STATE, p->cdev_max_state)) return -EMSGSIZE; return 0; } static int thermal_genl_event_cdev_delete(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_ID, p->cdev_id)) return -EMSGSIZE; return 0; } static int thermal_genl_event_cdev_state_update(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_ID, p->cdev_id) || nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_CUR_STATE, p->cdev_state)) return -EMSGSIZE; return 0; } static int thermal_genl_event_gov_change(struct param *p) { if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || nla_put_string(p->msg, THERMAL_GENL_ATTR_GOV_NAME, p->name)) return -EMSGSIZE; return 0; } static int thermal_genl_event_cpu_capability_change(struct param *p) { struct thermal_genl_cpu_caps *cpu_cap = p->cpu_capabilities; struct sk_buff *msg = p->msg; struct nlattr *start_cap; int i; start_cap = nla_nest_start(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY); if (!start_cap) return -EMSGSIZE; for (i = 0; i < p->cpu_capabilities_count; ++i) { if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, cpu_cap->cpu)) goto out_cancel_nest; if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, cpu_cap->performance)) goto out_cancel_nest; if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, cpu_cap->efficiency)) goto out_cancel_nest; ++cpu_cap; } nla_nest_end(msg, start_cap); return 0; out_cancel_nest: nla_nest_cancel(msg, start_cap); return -EMSGSIZE; } int thermal_genl_event_tz_delete(struct param *p) __attribute__((alias("thermal_genl_event_tz"))); int thermal_genl_event_tz_enable(struct param *p) __attribute__((alias("thermal_genl_event_tz"))); int thermal_genl_event_tz_disable(struct param *p) __attribute__((alias("thermal_genl_event_tz"))); int thermal_genl_event_tz_trip_down(struct param *p) __attribute__((alias("thermal_genl_event_tz_trip_up"))); int thermal_genl_event_tz_trip_change(struct param *p) __attribute__((alias("thermal_genl_event_tz_trip_add"))); static cb_t event_cb[] = { [THERMAL_GENL_EVENT_TZ_CREATE] = thermal_genl_event_tz_create, [THERMAL_GENL_EVENT_TZ_DELETE] = thermal_genl_event_tz_delete, [THERMAL_GENL_EVENT_TZ_ENABLE] = thermal_genl_event_tz_enable, [THERMAL_GENL_EVENT_TZ_DISABLE] = thermal_genl_event_tz_disable, [THERMAL_GENL_EVENT_TZ_TRIP_UP] = thermal_genl_event_tz_trip_up, [THERMAL_GENL_EVENT_TZ_TRIP_DOWN] = thermal_genl_event_tz_trip_down, [THERMAL_GENL_EVENT_TZ_TRIP_CHANGE] = thermal_genl_event_tz_trip_change, [THERMAL_GENL_EVENT_TZ_TRIP_ADD] = thermal_genl_event_tz_trip_add, [THERMAL_GENL_EVENT_TZ_TRIP_DELETE] = thermal_genl_event_tz_trip_delete, [THERMAL_GENL_EVENT_CDEV_ADD] = thermal_genl_event_cdev_add, [THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete, [THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update, [THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change, [THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE] = thermal_genl_event_cpu_capability_change, }; /* * Generic netlink event encoding */ static int thermal_genl_send_event(enum thermal_genl_event event, struct param *p) { struct sk_buff *msg; int ret = -EMSGSIZE; void *hdr; msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return -ENOMEM; p->msg = msg; hdr = genlmsg_put(msg, 0, 0, &thermal_gnl_family, 0, event); if (!hdr) goto out_free_msg; ret = event_cb[event](p); if (ret) goto out_cancel_msg; genlmsg_end(msg, hdr); genlmsg_multicast(&thermal_gnl_family, msg, 0, 1, GFP_KERNEL); return 0; out_cancel_msg: genlmsg_cancel(msg, hdr); out_free_msg: nlmsg_free(msg); return ret; } int thermal_notify_tz_create(int tz_id, const char *name) { struct param p = { .tz_id = tz_id, .name = name }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_CREATE, &p); } int thermal_notify_tz_delete(int tz_id) { struct param p = { .tz_id = tz_id }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DELETE, &p); } int thermal_notify_tz_enable(int tz_id) { struct param p = { .tz_id = tz_id }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_ENABLE, &p); } int thermal_notify_tz_disable(int tz_id) { struct param p = { .tz_id = tz_id }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DISABLE, &p); } int thermal_notify_tz_trip_down(int tz_id, int trip_id, int temp) { struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DOWN, &p); } int thermal_notify_tz_trip_up(int tz_id, int trip_id, int temp) { struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_UP, &p); } int thermal_notify_tz_trip_add(int tz_id, int trip_id, int trip_type, int trip_temp, int trip_hyst) { struct param p = { .tz_id = tz_id, .trip_id = trip_id, .trip_type = trip_type, .trip_temp = trip_temp, .trip_hyst = trip_hyst }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_ADD, &p); } int thermal_notify_tz_trip_delete(int tz_id, int trip_id) { struct param p = { .tz_id = tz_id, .trip_id = trip_id }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DELETE, &p); } int thermal_notify_tz_trip_change(int tz_id, int trip_id, int trip_type, int trip_temp, int trip_hyst) { struct param p = { .tz_id = tz_id, .trip_id = trip_id, .trip_type = trip_type, .trip_temp = trip_temp, .trip_hyst = trip_hyst }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_CHANGE, &p); } int thermal_notify_cdev_state_update(int cdev_id, int cdev_state) { struct param p = { .cdev_id = cdev_id, .cdev_state = cdev_state }; return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, &p); } int thermal_notify_cdev_add(int cdev_id, const char *name, int cdev_max_state) { struct param p = { .cdev_id = cdev_id, .name = name, .cdev_max_state = cdev_max_state }; return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_ADD, &p); } int thermal_notify_cdev_delete(int cdev_id) { struct param p = { .cdev_id = cdev_id }; return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_DELETE, &p); } int thermal_notify_tz_gov_change(int tz_id, const char *name) { struct param p = { .tz_id = tz_id, .name = name }; return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p); } int thermal_genl_cpu_capability_event(int count, struct thermal_genl_cpu_caps *caps) { struct param p = { .cpu_capabilities_count = count, .cpu_capabilities = caps }; return thermal_genl_send_event(THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, &p); } EXPORT_SYMBOL_GPL(thermal_genl_cpu_capability_event); /*************************** Command encoding ********************************/ static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz, void *data) { struct sk_buff *msg = data; if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, tz->id) || nla_put_string(msg, THERMAL_GENL_ATTR_TZ_NAME, tz->type)) return -EMSGSIZE; return 0; } static int thermal_genl_cmd_tz_get_id(struct param *p) { struct sk_buff *msg = p->msg; struct nlattr *start_tz; int ret; start_tz = nla_nest_start(msg, THERMAL_GENL_ATTR_TZ); if (!start_tz) return -EMSGSIZE; ret = for_each_thermal_zone(__thermal_genl_cmd_tz_get_id, msg); if (ret) goto out_cancel_nest; nla_nest_end(msg, start_tz); return 0; out_cancel_nest: nla_nest_cancel(msg, start_tz); return ret; } static int thermal_genl_cmd_tz_get_trip(struct param *p) { struct sk_buff *msg = p->msg; struct thermal_zone_device *tz; struct nlattr *start_trip; struct thermal_trip trip; int ret, i, id; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) return -EINVAL; id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); tz = thermal_zone_get_by_id(id); if (!tz) return -EINVAL; start_trip = nla_nest_start(msg, THERMAL_GENL_ATTR_TZ_TRIP); if (!start_trip) return -EMSGSIZE; mutex_lock(&tz->lock); for (i = 0; i < tz->num_trips; i++) { ret = __thermal_zone_get_trip(tz, i, &trip); if (ret) goto out_cancel_nest; if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip.type) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip.temperature) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip.hysteresis)) goto out_cancel_nest; } mutex_unlock(&tz->lock); nla_nest_end(msg, start_trip); return 0; out_cancel_nest: mutex_unlock(&tz->lock); return -EMSGSIZE; } static int thermal_genl_cmd_tz_get_temp(struct param *p) { struct sk_buff *msg = p->msg; struct thermal_zone_device *tz; int temp, ret, id; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) return -EINVAL; id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); tz = thermal_zone_get_by_id(id); if (!tz) return -EINVAL; ret = thermal_zone_get_temp(tz, &temp); if (ret) return ret; if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TEMP, temp)) return -EMSGSIZE; return 0; } static int thermal_genl_cmd_tz_get_gov(struct param *p) { struct sk_buff *msg = p->msg; struct thermal_zone_device *tz; int id, ret = 0; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) return -EINVAL; id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); tz = thermal_zone_get_by_id(id); if (!tz) return -EINVAL; mutex_lock(&tz->lock); if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id) || nla_put_string(msg, THERMAL_GENL_ATTR_TZ_GOV_NAME, tz->governor->name)) ret = -EMSGSIZE; mutex_unlock(&tz->lock); return ret; } static int __thermal_genl_cmd_cdev_get(struct thermal_cooling_device *cdev, void *data) { struct sk_buff *msg = data; if (nla_put_u32(msg, THERMAL_GENL_ATTR_CDEV_ID, cdev->id)) return -EMSGSIZE; if (nla_put_string(msg, THERMAL_GENL_ATTR_CDEV_NAME, cdev->type)) return -EMSGSIZE; return 0; } static int thermal_genl_cmd_cdev_get(struct param *p) { struct sk_buff *msg = p->msg; struct nlattr *start_cdev; int ret; start_cdev = nla_nest_start(msg, THERMAL_GENL_ATTR_CDEV); if (!start_cdev) return -EMSGSIZE; ret = for_each_thermal_cooling_device(__thermal_genl_cmd_cdev_get, msg); if (ret) goto out_cancel_nest; nla_nest_end(msg, start_cdev); return 0; out_cancel_nest: nla_nest_cancel(msg, start_cdev); return ret; } static cb_t cmd_cb[] = { [THERMAL_GENL_CMD_TZ_GET_ID] = thermal_genl_cmd_tz_get_id, [THERMAL_GENL_CMD_TZ_GET_TRIP] = thermal_genl_cmd_tz_get_trip, [THERMAL_GENL_CMD_TZ_GET_TEMP] = thermal_genl_cmd_tz_get_temp, [THERMAL_GENL_CMD_TZ_GET_GOV] = thermal_genl_cmd_tz_get_gov, [THERMAL_GENL_CMD_CDEV_GET] = thermal_genl_cmd_cdev_get, }; static int thermal_genl_cmd_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct param p = { .msg = skb }; const struct genl_dumpit_info *info = genl_dumpit_info(cb); int cmd = info->op.cmd; int ret; void *hdr; hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0, cmd); if (!hdr) return -EMSGSIZE; ret = cmd_cb[cmd](&p); if (ret) goto out_cancel_msg; genlmsg_end(skb, hdr); return 0; out_cancel_msg: genlmsg_cancel(skb, hdr); return ret; } static int thermal_genl_cmd_doit(struct sk_buff *skb, struct genl_info *info) { struct param p = { .attrs = info->attrs }; struct sk_buff *msg; void *hdr; int cmd = info->genlhdr->cmd; int ret = -EMSGSIZE; msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return -ENOMEM; p.msg = msg; hdr = genlmsg_put_reply(msg, info, &thermal_gnl_family, 0, cmd); if (!hdr) goto out_free_msg; ret = cmd_cb[cmd](&p); if (ret) goto out_cancel_msg; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); out_cancel_msg: genlmsg_cancel(msg, hdr); out_free_msg: nlmsg_free(msg); return ret; } static const struct genl_small_ops thermal_genl_ops[] = { { .cmd = THERMAL_GENL_CMD_TZ_GET_ID, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = thermal_genl_cmd_dumpit, }, { .cmd = THERMAL_GENL_CMD_TZ_GET_TRIP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = thermal_genl_cmd_doit, }, { .cmd = THERMAL_GENL_CMD_TZ_GET_TEMP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = thermal_genl_cmd_doit, }, { .cmd = THERMAL_GENL_CMD_TZ_GET_GOV, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = thermal_genl_cmd_doit, }, { .cmd = THERMAL_GENL_CMD_CDEV_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = thermal_genl_cmd_dumpit, }, }; static struct genl_family thermal_gnl_family __ro_after_init = { .hdrsize = 0, .name = THERMAL_GENL_FAMILY_NAME, .version = THERMAL_GENL_VERSION, .maxattr = THERMAL_GENL_ATTR_MAX, .policy = thermal_genl_policy, .small_ops = thermal_genl_ops, .n_small_ops = ARRAY_SIZE(thermal_genl_ops), .resv_start_op = THERMAL_GENL_CMD_CDEV_GET + 1, .mcgrps = thermal_genl_mcgrps, .n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps), }; int __init thermal_netlink_init(void) { return genl_register_family(&thermal_gnl_family); } void __init thermal_netlink_exit(void) { genl_unregister_family(&thermal_gnl_family); }
linux-master
drivers/thermal/thermal_netlink.c
// SPDX-License-Identifier: GPL-2.0 // // Copyright 2016 Freescale Semiconductor, Inc. #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/sizes.h> #include <linux/thermal.h> #include <linux/units.h> #include "thermal_hwmon.h" #define SITES_MAX 16 #define TMR_DISABLE 0x0 #define TMR_ME 0x80000000 #define TMR_ALPF 0x0c000000 #define TMR_ALPF_V2 0x03000000 #define TMTMIR_DEFAULT 0x0000000f #define TIER_DISABLE 0x0 #define TEUMR0_V2 0x51009c00 #define TMSARA_V2 0xe #define TMU_VER1 0x1 #define TMU_VER2 0x2 #define REGS_TMR 0x000 /* Mode Register */ #define TMR_DISABLE 0x0 #define TMR_ME 0x80000000 #define TMR_ALPF 0x0c000000 #define REGS_TMTMIR 0x008 /* Temperature measurement interval Register */ #define TMTMIR_DEFAULT 0x0000000f #define REGS_V2_TMSR 0x008 /* monitor site register */ #define REGS_V2_TMTMIR 0x00c /* Temperature measurement interval Register */ #define REGS_TIER 0x020 /* Interrupt Enable Register */ #define TIER_DISABLE 0x0 #define REGS_TTCFGR 0x080 /* Temperature Configuration Register */ #define REGS_TSCFGR 0x084 /* Sensor Configuration Register */ #define REGS_TRITSR(n) (0x100 + 16 * (n)) /* Immediate Temperature * Site Register */ #define TRITSR_V BIT(31) #define TRITSR_TP5 BIT(9) #define REGS_V2_TMSAR(n) (0x304 + 16 * (n)) /* TMU monitoring * site adjustment register */ #define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n * Control Register */ #define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision * Register n */ #define REGS_V2_TEUMR(n) (0xf00 + 4 * (n)) /* * Thermal zone data */ struct qoriq_sensor { int id; }; struct qoriq_tmu_data { int ver; struct regmap *regmap; struct clk *clk; struct qoriq_sensor sensor[SITES_MAX]; }; static struct qoriq_tmu_data *qoriq_sensor_to_data(struct qoriq_sensor *s) { return container_of(s, struct qoriq_tmu_data, sensor[s->id]); } static int tmu_get_temp(struct thermal_zone_device *tz, int *temp) { struct qoriq_sensor *qsensor = thermal_zone_device_priv(tz); struct qoriq_tmu_data *qdata = qoriq_sensor_to_data(qsensor); u32 val; /* * REGS_TRITSR(id) has the following layout: * * For TMU Rev1: * 31 ... 7 6 5 4 3 2 1 0 * V TEMP * * Where V bit signifies if the measurement is ready and is * within sensor range. TEMP is an 8 bit value representing * temperature in Celsius. * For TMU Rev2: * 31 ... 8 7 6 5 4 3 2 1 0 * V TEMP * * Where V bit signifies if the measurement is ready and is * within sensor range. TEMP is an 9 bit value representing * temperature in KelVin. */ regmap_read(qdata->regmap, REGS_TMR, &val); if (!(val & TMR_ME)) return -EAGAIN; if (regmap_read_poll_timeout(qdata->regmap, REGS_TRITSR(qsensor->id), val, val & TRITSR_V, USEC_PER_MSEC, 10 * USEC_PER_MSEC)) return -ENODATA; if (qdata->ver == TMU_VER1) { *temp = (val & GENMASK(7, 0)) * MILLIDEGREE_PER_DEGREE; } else { if (val & TRITSR_TP5) *temp = milli_kelvin_to_millicelsius((val & GENMASK(8, 0)) * MILLIDEGREE_PER_DEGREE + 500); else *temp = kelvin_to_millicelsius(val & GENMASK(8, 0)); } return 0; } static const struct thermal_zone_device_ops tmu_tz_ops = { .get_temp = tmu_get_temp, }; static int qoriq_tmu_register_tmu_zone(struct device *dev, struct qoriq_tmu_data *qdata) { int id, sites = 0; for (id = 0; id < SITES_MAX; id++) { struct thermal_zone_device *tzd; struct qoriq_sensor *sensor = &qdata->sensor[id]; int ret; sensor->id = id; tzd = devm_thermal_of_zone_register(dev, id, sensor, &tmu_tz_ops); ret = PTR_ERR_OR_ZERO(tzd); if (ret) { if (ret == -ENODEV) continue; return ret; } if (qdata->ver == TMU_VER1) sites |= 0x1 << (15 - id); else sites |= 0x1 << id; devm_thermal_add_hwmon_sysfs(dev, tzd); } if (sites) { if (qdata->ver == TMU_VER1) { regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF | sites); } else { regmap_write(qdata->regmap, REGS_V2_TMSR, sites); regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2); } } return 0; } static int qoriq_tmu_calibration(struct device *dev, struct qoriq_tmu_data *data) { int i, val, len; u32 range[4]; const u32 *calibration; struct device_node *np = dev->of_node; len = of_property_count_u32_elems(np, "fsl,tmu-range"); if (len < 0 || len > 4) { dev_err(dev, "invalid range data.\n"); return len; } val = of_property_read_u32_array(np, "fsl,tmu-range", range, len); if (val != 0) { dev_err(dev, "failed to read range data.\n"); return val; } /* Init temperature range registers */ for (i = 0; i < len; i++) regmap_write(data->regmap, REGS_TTRnCR(i), range[i]); calibration = of_get_property(np, "fsl,tmu-calibration", &len); if (calibration == NULL || len % 8) { dev_err(dev, "invalid calibration data.\n"); return -ENODEV; } for (i = 0; i < len; i += 8, calibration += 2) { val = of_read_number(calibration, 1); regmap_write(data->regmap, REGS_TTCFGR, val); val = of_read_number(calibration + 1, 1); regmap_write(data->regmap, REGS_TSCFGR, val); } return 0; } static void qoriq_tmu_init_device(struct qoriq_tmu_data *data) { /* Disable interrupt, using polling instead */ regmap_write(data->regmap, REGS_TIER, TIER_DISABLE); /* Set update_interval */ if (data->ver == TMU_VER1) { regmap_write(data->regmap, REGS_TMTMIR, TMTMIR_DEFAULT); } else { regmap_write(data->regmap, REGS_V2_TMTMIR, TMTMIR_DEFAULT); regmap_write(data->regmap, REGS_V2_TEUMR(0), TEUMR0_V2); } /* Disable monitoring */ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE); } static const struct regmap_range qoriq_yes_ranges[] = { regmap_reg_range(REGS_TMR, REGS_TSCFGR), regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(15)), regmap_reg_range(REGS_V2_TEUMR(0), REGS_V2_TEUMR(2)), regmap_reg_range(REGS_V2_TMSAR(0), REGS_V2_TMSAR(15)), regmap_reg_range(REGS_IPBRR(0), REGS_IPBRR(1)), /* Read only registers below */ regmap_reg_range(REGS_TRITSR(0), REGS_TRITSR(15)), }; static const struct regmap_access_table qoriq_wr_table = { .yes_ranges = qoriq_yes_ranges, .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges) - 1, }; static const struct regmap_access_table qoriq_rd_table = { .yes_ranges = qoriq_yes_ranges, .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges), }; static void qoriq_tmu_action(void *p) { struct qoriq_tmu_data *data = p; regmap_write(data->regmap, REGS_TMR, TMR_DISABLE); clk_disable_unprepare(data->clk); } static int qoriq_tmu_probe(struct platform_device *pdev) { int ret; u32 ver; struct qoriq_tmu_data *data; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; const bool little_endian = of_property_read_bool(np, "little-endian"); const enum regmap_endian format_endian = little_endian ? REGMAP_ENDIAN_LITTLE : REGMAP_ENDIAN_BIG; const struct regmap_config regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .rd_table = &qoriq_rd_table, .wr_table = &qoriq_wr_table, .val_format_endian = format_endian, .max_register = SZ_4K, }; void __iomem *base; data = devm_kzalloc(dev, sizeof(struct qoriq_tmu_data), GFP_KERNEL); if (!data) return -ENOMEM; base = devm_platform_ioremap_resource(pdev, 0); ret = PTR_ERR_OR_ZERO(base); if (ret) { dev_err(dev, "Failed to get memory region\n"); return ret; } data->regmap = devm_regmap_init_mmio(dev, base, &regmap_config); ret = PTR_ERR_OR_ZERO(data->regmap); if (ret) { dev_err(dev, "Failed to init regmap (%d)\n", ret); return ret; } data->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(data->clk)) return PTR_ERR(data->clk); ret = clk_prepare_enable(data->clk); if (ret) { dev_err(dev, "Failed to enable clock\n"); return ret; } ret = devm_add_action_or_reset(dev, qoriq_tmu_action, data); if (ret) return ret; /* version register offset at: 0xbf8 on both v1 and v2 */ ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver); if (ret) { dev_err(&pdev->dev, "Failed to read IP block version\n"); return ret; } data->ver = (ver >> 8) & 0xff; qoriq_tmu_init_device(data); /* TMU initialization */ ret = qoriq_tmu_calibration(dev, data); /* TMU calibration */ if (ret < 0) return ret; ret = qoriq_tmu_register_tmu_zone(dev, data); if (ret < 0) { dev_err(dev, "Failed to register sensors\n"); return ret; } platform_set_drvdata(pdev, data); return 0; } static int __maybe_unused qoriq_tmu_suspend(struct device *dev) { struct qoriq_tmu_data *data = dev_get_drvdata(dev); int ret; ret = regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, 0); if (ret) return ret; clk_disable_unprepare(data->clk); return 0; } static int __maybe_unused qoriq_tmu_resume(struct device *dev) { int ret; struct qoriq_tmu_data *data = dev_get_drvdata(dev); ret = clk_prepare_enable(data->clk); if (ret) return ret; /* Enable monitoring */ return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME); } static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops, qoriq_tmu_suspend, qoriq_tmu_resume); static const struct of_device_id qoriq_tmu_match[] = { { .compatible = "fsl,qoriq-tmu", }, { .compatible = "fsl,imx8mq-tmu", }, {}, }; MODULE_DEVICE_TABLE(of, qoriq_tmu_match); static struct platform_driver qoriq_tmu = { .driver = { .name = "qoriq_thermal", .pm = &qoriq_tmu_pm_ops, .of_match_table = qoriq_tmu_match, }, .probe = qoriq_tmu_probe, }; module_platform_driver(qoriq_tmu); MODULE_AUTHOR("Jia Hongtao <[email protected]>"); MODULE_DESCRIPTION("QorIQ Thermal Monitoring Unit driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/qoriq_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2020 NXP. * * Author: Anson Huang <[email protected]> */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/thermal.h> #include "thermal_hwmon.h" #define TER 0x0 /* TMU enable */ #define TPS 0x4 #define TRITSR 0x20 /* TMU immediate temp */ /* TMU calibration data registers */ #define TASR 0x28 #define TASR_BUF_SLOPE_MASK GENMASK(19, 16) #define TASR_BUF_VREF_MASK GENMASK(4, 0) /* TMU_V1 */ #define TASR_BUF_VERF_SEL_MASK GENMASK(1, 0) /* TMU_V2 */ #define TCALIV(n) (0x30 + ((n) * 4)) #define TCALIV_EN BIT(31) #define TCALIV_HR_MASK GENMASK(23, 16) /* TMU_V1 */ #define TCALIV_RT_MASK GENMASK(7, 0) /* TMU_V1 */ #define TCALIV_SNSR105C_MASK GENMASK(27, 16) /* TMU_V2 */ #define TCALIV_SNSR25C_MASK GENMASK(11, 0) /* TMU_V2 */ #define TRIM 0x3c #define TRIM_BJT_CUR_MASK GENMASK(23, 20) #define TRIM_BGR_MASK GENMASK(31, 28) #define TRIM_VLSB_MASK GENMASK(15, 12) #define TRIM_EN_CH BIT(7) #define TER_ADC_PD BIT(30) #define TER_EN BIT(31) #define TRITSR_TEMP0_VAL_MASK GENMASK(7, 0) #define TRITSR_TEMP1_VAL_MASK GENMASK(23, 16) #define PROBE_SEL_ALL GENMASK(31, 30) #define probe_status_offset(x) (30 + x) #define SIGN_BIT BIT(7) #define TEMP_VAL_MASK GENMASK(6, 0) /* TMU OCOTP calibration data bitfields */ #define ANA0_EN BIT(25) #define ANA0_BUF_VREF_MASK GENMASK(24, 20) #define ANA0_BUF_SLOPE_MASK GENMASK(19, 16) #define ANA0_HR_MASK GENMASK(15, 8) #define ANA0_RT_MASK GENMASK(7, 0) #define TRIM2_VLSB_MASK GENMASK(23, 20) #define TRIM2_BGR_MASK GENMASK(19, 16) #define TRIM2_BJT_CUR_MASK GENMASK(15, 12) #define TRIM2_BUF_SLOP_SEL_MASK GENMASK(11, 8) #define TRIM2_BUF_VERF_SEL_MASK GENMASK(7, 6) #define TRIM3_TCA25_0_LSB_MASK GENMASK(31, 28) #define TRIM3_TCA40_0_MASK GENMASK(27, 16) #define TRIM4_TCA40_1_MASK GENMASK(31, 20) #define TRIM4_TCA105_0_MASK GENMASK(19, 8) #define TRIM4_TCA25_0_MSB_MASK GENMASK(7, 0) #define TRIM5_TCA105_1_MASK GENMASK(23, 12) #define TRIM5_TCA25_1_MASK GENMASK(11, 0) #define VER1_TEMP_LOW_LIMIT 10000 #define VER2_TEMP_LOW_LIMIT -40000 #define VER2_TEMP_HIGH_LIMIT 125000 #define TMU_VER1 0x1 #define TMU_VER2 0x2 struct thermal_soc_data { u32 num_sensors; u32 version; int (*get_temp)(void *, int *); }; struct tmu_sensor { struct imx8mm_tmu *priv; u32 hw_id; struct thermal_zone_device *tzd; }; struct imx8mm_tmu { void __iomem *base; struct clk *clk; const struct thermal_soc_data *socdata; struct tmu_sensor sensors[]; }; static int imx8mm_tmu_get_temp(void *data, int *temp) { struct tmu_sensor *sensor = data; struct imx8mm_tmu *tmu = sensor->priv; u32 val; val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK; /* * Do not validate against the V bit (bit 31) due to errata * ERR051272: TMU: Bit 31 of registers TMU_TSCR/TMU_TRITSR/TMU_TRATSR invalid */ *temp = val * 1000; if (*temp < VER1_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT) return -EAGAIN; return 0; } static int imx8mp_tmu_get_temp(void *data, int *temp) { struct tmu_sensor *sensor = data; struct imx8mm_tmu *tmu = sensor->priv; unsigned long val; bool ready; val = readl_relaxed(tmu->base + TRITSR); ready = test_bit(probe_status_offset(sensor->hw_id), &val); if (!ready) return -EAGAIN; val = sensor->hw_id ? FIELD_GET(TRITSR_TEMP1_VAL_MASK, val) : FIELD_GET(TRITSR_TEMP0_VAL_MASK, val); if (val & SIGN_BIT) /* negative */ val = (~(val & TEMP_VAL_MASK) + 1); *temp = val * 1000; if (*temp < VER2_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT) return -EAGAIN; return 0; } static int tmu_get_temp(struct thermal_zone_device *tz, int *temp) { struct tmu_sensor *sensor = thermal_zone_device_priv(tz); struct imx8mm_tmu *tmu = sensor->priv; return tmu->socdata->get_temp(sensor, temp); } static const struct thermal_zone_device_ops tmu_tz_ops = { .get_temp = tmu_get_temp, }; static void imx8mm_tmu_enable(struct imx8mm_tmu *tmu, bool enable) { u32 val; val = readl_relaxed(tmu->base + TER); val = enable ? (val | TER_EN) : (val & ~TER_EN); if (tmu->socdata->version == TMU_VER2) val = enable ? (val & ~TER_ADC_PD) : (val | TER_ADC_PD); writel_relaxed(val, tmu->base + TER); } static void imx8mm_tmu_probe_sel_all(struct imx8mm_tmu *tmu) { u32 val; val = readl_relaxed(tmu->base + TPS); val |= PROBE_SEL_ALL; writel_relaxed(val, tmu->base + TPS); } static int imx8mm_tmu_probe_set_calib_v1(struct platform_device *pdev, struct imx8mm_tmu *tmu) { struct device *dev = &pdev->dev; u32 ana0; int ret; ret = nvmem_cell_read_u32(&pdev->dev, "calib", &ana0); if (ret) return dev_err_probe(dev, ret, "Failed to read OCOTP nvmem cell\n"); writel(FIELD_PREP(TASR_BUF_VREF_MASK, FIELD_GET(ANA0_BUF_VREF_MASK, ana0)) | FIELD_PREP(TASR_BUF_SLOPE_MASK, FIELD_GET(ANA0_BUF_SLOPE_MASK, ana0)), tmu->base + TASR); writel(FIELD_PREP(TCALIV_RT_MASK, FIELD_GET(ANA0_RT_MASK, ana0)) | FIELD_PREP(TCALIV_HR_MASK, FIELD_GET(ANA0_HR_MASK, ana0)) | ((ana0 & ANA0_EN) ? TCALIV_EN : 0), tmu->base + TCALIV(0)); return 0; } static int imx8mm_tmu_probe_set_calib_v2(struct platform_device *pdev, struct imx8mm_tmu *tmu) { struct device *dev = &pdev->dev; struct nvmem_cell *cell; u32 trim[4] = { 0 }; size_t len; void *buf; cell = nvmem_cell_get(dev, "calib"); if (IS_ERR(cell)) return PTR_ERR(cell); buf = nvmem_cell_read(cell, &len); nvmem_cell_put(cell); if (IS_ERR(buf)) return PTR_ERR(buf); memcpy(trim, buf, min(len, sizeof(trim))); kfree(buf); if (len != 16) { dev_err(dev, "OCOTP nvmem cell length is %zu, must be 16.\n", len); return -EINVAL; } /* Blank sample hardware */ if (!trim[0] && !trim[1] && !trim[2] && !trim[3]) { /* Use a default 25C binary codes */ writel(FIELD_PREP(TCALIV_SNSR25C_MASK, 0x63c), tmu->base + TCALIV(0)); writel(FIELD_PREP(TCALIV_SNSR25C_MASK, 0x63c), tmu->base + TCALIV(1)); return 0; } writel(FIELD_PREP(TASR_BUF_VERF_SEL_MASK, FIELD_GET(TRIM2_BUF_VERF_SEL_MASK, trim[0])) | FIELD_PREP(TASR_BUF_SLOPE_MASK, FIELD_GET(TRIM2_BUF_SLOP_SEL_MASK, trim[0])), tmu->base + TASR); writel(FIELD_PREP(TRIM_BJT_CUR_MASK, FIELD_GET(TRIM2_BJT_CUR_MASK, trim[0])) | FIELD_PREP(TRIM_BGR_MASK, FIELD_GET(TRIM2_BGR_MASK, trim[0])) | FIELD_PREP(TRIM_VLSB_MASK, FIELD_GET(TRIM2_VLSB_MASK, trim[0])) | TRIM_EN_CH, tmu->base + TRIM); writel(FIELD_PREP(TCALIV_SNSR25C_MASK, FIELD_GET(TRIM3_TCA25_0_LSB_MASK, trim[1]) | (FIELD_GET(TRIM4_TCA25_0_MSB_MASK, trim[2]) << 4)) | FIELD_PREP(TCALIV_SNSR105C_MASK, FIELD_GET(TRIM4_TCA105_0_MASK, trim[2])), tmu->base + TCALIV(0)); writel(FIELD_PREP(TCALIV_SNSR25C_MASK, FIELD_GET(TRIM5_TCA25_1_MASK, trim[3])) | FIELD_PREP(TCALIV_SNSR105C_MASK, FIELD_GET(TRIM5_TCA105_1_MASK, trim[3])), tmu->base + TCALIV(1)); writel(FIELD_PREP(TCALIV_SNSR25C_MASK, FIELD_GET(TRIM3_TCA40_0_MASK, trim[1])) | FIELD_PREP(TCALIV_SNSR105C_MASK, FIELD_GET(TRIM4_TCA40_1_MASK, trim[2])), tmu->base + TCALIV(2)); return 0; } static int imx8mm_tmu_probe_set_calib(struct platform_device *pdev, struct imx8mm_tmu *tmu) { struct device *dev = &pdev->dev; /* * Lack of calibration data OCOTP reference is not considered * fatal to retain compatibility with old DTs. It is however * strongly recommended to update such old DTs to get correct * temperature compensation values for each SoC. */ if (!of_property_present(pdev->dev.of_node, "nvmem-cells")) { dev_warn(dev, "No OCOTP nvmem reference found, SoC-specific calibration not loaded. Please update your DT.\n"); return 0; } if (tmu->socdata->version == TMU_VER1) return imx8mm_tmu_probe_set_calib_v1(pdev, tmu); return imx8mm_tmu_probe_set_calib_v2(pdev, tmu); } static int imx8mm_tmu_probe(struct platform_device *pdev) { const struct thermal_soc_data *data; struct imx8mm_tmu *tmu; int ret; int i; data = of_device_get_match_data(&pdev->dev); tmu = devm_kzalloc(&pdev->dev, struct_size(tmu, sensors, data->num_sensors), GFP_KERNEL); if (!tmu) return -ENOMEM; tmu->socdata = data; tmu->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tmu->base)) return PTR_ERR(tmu->base); tmu->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(tmu->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(tmu->clk), "failed to get tmu clock\n"); ret = clk_prepare_enable(tmu->clk); if (ret) { dev_err(&pdev->dev, "failed to enable tmu clock: %d\n", ret); return ret; } /* disable the monitor during initialization */ imx8mm_tmu_enable(tmu, false); for (i = 0; i < data->num_sensors; i++) { tmu->sensors[i].priv = tmu; tmu->sensors[i].tzd = devm_thermal_of_zone_register(&pdev->dev, i, &tmu->sensors[i], &tmu_tz_ops); if (IS_ERR(tmu->sensors[i].tzd)) { ret = PTR_ERR(tmu->sensors[i].tzd); dev_err(&pdev->dev, "failed to register thermal zone sensor[%d]: %d\n", i, ret); goto disable_clk; } tmu->sensors[i].hw_id = i; devm_thermal_add_hwmon_sysfs(&pdev->dev, tmu->sensors[i].tzd); } platform_set_drvdata(pdev, tmu); ret = imx8mm_tmu_probe_set_calib(pdev, tmu); if (ret) goto disable_clk; /* enable all the probes for V2 TMU */ if (tmu->socdata->version == TMU_VER2) imx8mm_tmu_probe_sel_all(tmu); /* enable the monitor */ imx8mm_tmu_enable(tmu, true); return 0; disable_clk: clk_disable_unprepare(tmu->clk); return ret; } static int imx8mm_tmu_remove(struct platform_device *pdev) { struct imx8mm_tmu *tmu = platform_get_drvdata(pdev); /* disable TMU */ imx8mm_tmu_enable(tmu, false); clk_disable_unprepare(tmu->clk); platform_set_drvdata(pdev, NULL); return 0; } static struct thermal_soc_data imx8mm_tmu_data = { .num_sensors = 1, .version = TMU_VER1, .get_temp = imx8mm_tmu_get_temp, }; static struct thermal_soc_data imx8mp_tmu_data = { .num_sensors = 2, .version = TMU_VER2, .get_temp = imx8mp_tmu_get_temp, }; static const struct of_device_id imx8mm_tmu_table[] = { { .compatible = "fsl,imx8mm-tmu", .data = &imx8mm_tmu_data, }, { .compatible = "fsl,imx8mp-tmu", .data = &imx8mp_tmu_data, }, { }, }; MODULE_DEVICE_TABLE(of, imx8mm_tmu_table); static struct platform_driver imx8mm_tmu = { .driver = { .name = "i.mx8mm_thermal", .of_match_table = imx8mm_tmu_table, }, .probe = imx8mm_tmu_probe, .remove = imx8mm_tmu_remove, }; module_platform_driver(imx8mm_tmu); MODULE_AUTHOR("Anson Huang <[email protected]>"); MODULE_DESCRIPTION("i.MX8MM Thermal Monitor Unit driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/imx8mm_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * TI Bandgap temperature sensor driver for K3 SoC Family * * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ */ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/thermal.h> #include <linux/types.h> #include "thermal_hwmon.h" #define K3_VTM_DEVINFO_PWR0_OFFSET 0x4 #define K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK 0xf0 #define K3_VTM_TMPSENS0_CTRL_OFFSET 0x80 #define K3_VTM_REGS_PER_TS 0x10 #define K3_VTM_TS_STAT_DTEMP_MASK 0x3ff #define K3_VTM_TMPSENS_CTRL_CBIASSEL BIT(0) #define K3_VTM_TMPSENS_CTRL_SOC BIT(5) #define K3_VTM_TMPSENS_CTRL_CLRZ BIT(6) #define K3_VTM_TMPSENS_CTRL_CLKON_REQ BIT(7) #define K3_VTM_ADC_BEGIN_VAL 540 #define K3_VTM_ADC_END_VAL 944 static const int k3_adc_to_temp[] = { -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200, -37800, -37400, -37000, -36600, -36200, -35800, -35300, -34700, -34200, -33800, -33400, -33000, -32600, -32200, -31800, -31400, -31000, -30600, -30200, -29800, -29400, -29000, -28600, -28200, -27700, -27100, -26600, -26200, -25800, -25400, -25000, -24600, -24200, -23800, -23400, -23000, -22600, -22200, -21800, -21400, -21000, -20500, -19900, -19400, -19000, -18600, -18200, -17800, -17400, -17000, -16600, -16200, -15800, -15400, -15000, -14600, -14200, -13800, -13400, -13000, -12500, -11900, -11400, -11000, -10600, -10200, -9800, -9400, -9000, -8600, -8200, -7800, -7400, -7000, -6600, -6200, -5800, -5400, -5000, -4500, -3900, -3400, -3000, -2600, -2200, -1800, -1400, -1000, -600, -200, 200, 600, 1000, 1400, 1800, 2200, 2600, 3000, 3400, 3900, 4500, 5000, 5400, 5800, 6200, 6600, 7000, 7400, 7800, 8200, 8600, 9000, 9400, 9800, 10200, 10600, 11000, 11400, 11800, 12200, 12700, 13300, 13800, 14200, 14600, 15000, 15400, 15800, 16200, 16600, 17000, 17400, 17800, 18200, 18600, 19000, 19400, 19800, 20200, 20600, 21000, 21400, 21900, 22500, 23000, 23400, 23800, 24200, 24600, 25000, 25400, 25800, 26200, 26600, 27000, 27400, 27800, 28200, 28600, 29000, 29400, 29800, 30200, 30600, 31000, 31400, 31900, 32500, 33000, 33400, 33800, 34200, 34600, 35000, 35400, 35800, 36200, 36600, 37000, 37400, 37800, 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41000, 41400, 41800, 42200, 42600, 43100, 43700, 44200, 44600, 45000, 45400, 45800, 46200, 46600, 47000, 47400, 47800, 48200, 48600, 49000, 49400, 49800, 50200, 50600, 51000, 51400, 51800, 52200, 52600, 53000, 53400, 53800, 54200, 54600, 55000, 55400, 55900, 56500, 57000, 57400, 57800, 58200, 58600, 59000, 59400, 59800, 60200, 60600, 61000, 61400, 61800, 62200, 62600, 63000, 63400, 63800, 64200, 64600, 65000, 65400, 65800, 66200, 66600, 67000, 67400, 67800, 68200, 68600, 69000, 69400, 69800, 70200, 70600, 71000, 71500, 72100, 72600, 73000, 73400, 73800, 74200, 74600, 75000, 75400, 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000, 79400, 79800, 80200, 80600, 81000, 81400, 81800, 82200, 82600, 83000, 83400, 83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000, 87400, 87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, 91400, 91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, 95400, 95800, 96200, 96600, 97000, 97500, 98100, 98600, 99000, 99400, 99800, 100200, 100600, 101000, 101400, 101800, 102200, 102600, 103000, 103400, 103800, 104200, 104600, 105000, 105400, 105800, 106200, 106600, 107000, 107400, 107800, 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000, 111400, 111800, 112200, 112600, 113000, 113400, 113800, 114200, 114600, 115000, 115400, 115800, 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000, 119400, 119800, 120200, 120600, 121000, 121400, 121800, 122200, 122600, 123000, 123400, 123800, 124200, 124600, 124900, 125000, }; struct k3_bandgap { void __iomem *base; const struct k3_bandgap_data *conf; }; /* common data structures */ struct k3_thermal_data { struct thermal_zone_device *tzd; struct k3_bandgap *bgp; int sensor_id; u32 ctrl_offset; u32 stat_offset; }; static unsigned int vtm_get_best_value(unsigned int s0, unsigned int s1, unsigned int s2) { int d01 = abs(s0 - s1); int d02 = abs(s0 - s2); int d12 = abs(s1 - s2); if (d01 <= d02 && d01 <= d12) return (s0 + s1) / 2; if (d02 <= d01 && d02 <= d12) return (s0 + s2) / 2; return (s1 + s2) / 2; } static int k3_bgp_read_temp(struct k3_thermal_data *devdata, int *temp) { struct k3_bandgap *bgp; unsigned int dtemp, s0, s1, s2; bgp = devdata->bgp; /* * Errata is applicable for am654 pg 1.0 silicon. There * is a variation of the order for 8-10 degree centigrade. * Work around that by getting the average of two closest * readings out of three readings everytime we want to * report temperatures. * * Errata workaround. */ s0 = readl(bgp->base + devdata->stat_offset) & K3_VTM_TS_STAT_DTEMP_MASK; s1 = readl(bgp->base + devdata->stat_offset) & K3_VTM_TS_STAT_DTEMP_MASK; s2 = readl(bgp->base + devdata->stat_offset) & K3_VTM_TS_STAT_DTEMP_MASK; dtemp = vtm_get_best_value(s0, s1, s2); if (dtemp < K3_VTM_ADC_BEGIN_VAL || dtemp > K3_VTM_ADC_END_VAL) return -EINVAL; *temp = k3_adc_to_temp[dtemp - K3_VTM_ADC_BEGIN_VAL]; return 0; } static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct k3_thermal_data *data = thermal_zone_device_priv(tz); int ret = 0; ret = k3_bgp_read_temp(data, temp); if (ret) return ret; return ret; } static const struct thermal_zone_device_ops k3_of_thermal_ops = { .get_temp = k3_thermal_get_temp, }; static const struct of_device_id of_k3_bandgap_match[]; static int k3_bandgap_probe(struct platform_device *pdev) { int ret = 0, cnt, val, id; struct resource *res; struct device *dev = &pdev->dev; struct k3_bandgap *bgp; struct k3_thermal_data *data; if (ARRAY_SIZE(k3_adc_to_temp) != (K3_VTM_ADC_END_VAL + 1 - K3_VTM_ADC_BEGIN_VAL)) return -EINVAL; bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); if (!bgp) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bgp->base = devm_ioremap_resource(dev, res); if (IS_ERR(bgp->base)) return PTR_ERR(bgp->base); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } /* Get the sensor count in the VTM */ val = readl(bgp->base + K3_VTM_DEVINFO_PWR0_OFFSET); cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK; cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK); data = devm_kcalloc(dev, cnt, sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto err_alloc; } /* Register the thermal sensors */ for (id = 0; id < cnt; id++) { data[id].sensor_id = id; data[id].bgp = bgp; data[id].ctrl_offset = K3_VTM_TMPSENS0_CTRL_OFFSET + id * K3_VTM_REGS_PER_TS; data[id].stat_offset = data[id].ctrl_offset + 0x8; val = readl(data[id].bgp->base + data[id].ctrl_offset); val |= (K3_VTM_TMPSENS_CTRL_SOC | K3_VTM_TMPSENS_CTRL_CLRZ | K3_VTM_TMPSENS_CTRL_CLKON_REQ); val &= ~K3_VTM_TMPSENS_CTRL_CBIASSEL; writel(val, data[id].bgp->base + data[id].ctrl_offset); data[id].tzd = devm_thermal_of_zone_register(dev, id, &data[id], &k3_of_thermal_ops); if (IS_ERR(data[id].tzd)) { dev_err(dev, "thermal zone device is NULL\n"); ret = PTR_ERR(data[id].tzd); goto err_alloc; } devm_thermal_add_hwmon_sysfs(dev, data[id].tzd); } return 0; err_alloc: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; } static int k3_bandgap_remove(struct platform_device *pdev) { pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static const struct of_device_id of_k3_bandgap_match[] = { { .compatible = "ti,am654-vtm", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, of_k3_bandgap_match); static struct platform_driver k3_bandgap_sensor_driver = { .probe = k3_bandgap_probe, .remove = k3_bandgap_remove, .driver = { .name = "k3-soc-thermal", .of_match_table = of_k3_bandgap_match, }, }; module_platform_driver(k3_bandgap_sensor_driver); MODULE_DESCRIPTION("K3 bandgap temperature sensor driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("J Keerthy <[email protected]>");
linux-master
drivers/thermal/k3_bandgap.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas RZ/G2L TSU Thermal Sensor Driver * * Copyright (C) 2021 Renesas Electronics Corporation */ #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/math.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/thermal.h> #include <linux/units.h> #include "thermal_hwmon.h" #define CTEMP_MASK 0xFFF /* default calibration values, if FUSE values are missing */ #define SW_CALIB0_VAL 3148 #define SW_CALIB1_VAL 503 /* Register offsets */ #define TSU_SM 0x00 #define TSU_ST 0x04 #define TSU_SAD 0x0C #define TSU_SS 0x10 #define OTPTSUTRIM_REG(n) (0x18 + ((n) * 0x4)) #define OTPTSUTRIM_EN_MASK BIT(31) #define OTPTSUTRIM_MASK GENMASK(11, 0) /* Sensor Mode Register(TSU_SM) */ #define TSU_SM_EN_TS BIT(0) #define TSU_SM_ADC_EN_TS BIT(1) #define TSU_SM_NORMAL_MODE (TSU_SM_EN_TS | TSU_SM_ADC_EN_TS) /* TSU_ST bits */ #define TSU_ST_START BIT(0) #define TSU_SS_CONV_RUNNING BIT(0) #define TS_CODE_AVE_SCALE(x) ((x) * 1000000) #define MCELSIUS(temp) ((temp) * MILLIDEGREE_PER_DEGREE) #define TS_CODE_CAP_TIMES 8 /* Total number of ADC data samples */ #define RZG2L_THERMAL_GRAN 500 /* milli Celsius */ #define RZG2L_TSU_SS_TIMEOUT_US 1000 #define CURVATURE_CORRECTION_CONST 13 struct rzg2l_thermal_priv { struct device *dev; void __iomem *base; struct thermal_zone_device *zone; struct reset_control *rstc; u32 calib0, calib1; }; static inline u32 rzg2l_thermal_read(struct rzg2l_thermal_priv *priv, u32 reg) { return ioread32(priv->base + reg); } static inline void rzg2l_thermal_write(struct rzg2l_thermal_priv *priv, u32 reg, u32 data) { iowrite32(data, priv->base + reg); } static int rzg2l_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct rzg2l_thermal_priv *priv = thermal_zone_device_priv(tz); u32 result = 0, dsensor, ts_code_ave; int val, i; for (i = 0; i < TS_CODE_CAP_TIMES ; i++) { /* * TSU repeats measurement at 20 microseconds intervals and * automatically updates the results of measurement. As per * the HW manual for measuring temperature we need to read 8 * values consecutively and then take the average. * ts_code_ave = (ts_code[0] + ⋯ + ts_code[7]) / 8 */ result += rzg2l_thermal_read(priv, TSU_SAD) & CTEMP_MASK; usleep_range(20, 30); } ts_code_ave = result / TS_CODE_CAP_TIMES; /* * Calculate actual sensor value by applying curvature correction formula * dsensor = ts_code_ave / (1 + ts_code_ave * 0.000013). Here we are doing * integer calculation by scaling all the values by 1000000. */ dsensor = TS_CODE_AVE_SCALE(ts_code_ave) / (TS_CODE_AVE_SCALE(1) + (ts_code_ave * CURVATURE_CORRECTION_CONST)); /* * The temperature Tj is calculated by the formula * Tj = (dsensor − calib1) * 165/ (calib0 − calib1) − 40 * where calib0 and calib1 are the calibration values. */ val = ((dsensor - priv->calib1) * (MCELSIUS(165) / (priv->calib0 - priv->calib1))) - MCELSIUS(40); *temp = roundup(val, RZG2L_THERMAL_GRAN); return 0; } static const struct thermal_zone_device_ops rzg2l_tz_of_ops = { .get_temp = rzg2l_thermal_get_temp, }; static int rzg2l_thermal_init(struct rzg2l_thermal_priv *priv) { u32 reg_val; rzg2l_thermal_write(priv, TSU_SM, TSU_SM_NORMAL_MODE); rzg2l_thermal_write(priv, TSU_ST, 0); /* * Before setting the START bit, TSU should be in normal operating * mode. As per the HW manual, it will take 60 µs to place the TSU * into normal operating mode. */ usleep_range(60, 80); reg_val = rzg2l_thermal_read(priv, TSU_ST); reg_val |= TSU_ST_START; rzg2l_thermal_write(priv, TSU_ST, reg_val); return readl_poll_timeout(priv->base + TSU_SS, reg_val, reg_val == TSU_SS_CONV_RUNNING, 50, RZG2L_TSU_SS_TIMEOUT_US); } static void rzg2l_thermal_reset_assert_pm_disable_put(struct platform_device *pdev) { struct rzg2l_thermal_priv *priv = dev_get_drvdata(&pdev->dev); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); } static int rzg2l_thermal_remove(struct platform_device *pdev) { struct rzg2l_thermal_priv *priv = dev_get_drvdata(&pdev->dev); thermal_remove_hwmon_sysfs(priv->zone); rzg2l_thermal_reset_assert_pm_disable_put(pdev); return 0; } static int rzg2l_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *zone; struct rzg2l_thermal_priv *priv; struct device *dev = &pdev->dev; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); priv->dev = dev; priv->rstc = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(priv->rstc)) return dev_err_probe(dev, PTR_ERR(priv->rstc), "failed to get cpg reset"); ret = reset_control_deassert(priv->rstc); if (ret) return dev_err_probe(dev, ret, "failed to deassert"); pm_runtime_enable(dev); pm_runtime_get_sync(dev); priv->calib0 = rzg2l_thermal_read(priv, OTPTSUTRIM_REG(0)); if (priv->calib0 & OTPTSUTRIM_EN_MASK) priv->calib0 &= OTPTSUTRIM_MASK; else priv->calib0 = SW_CALIB0_VAL; priv->calib1 = rzg2l_thermal_read(priv, OTPTSUTRIM_REG(1)); if (priv->calib1 & OTPTSUTRIM_EN_MASK) priv->calib1 &= OTPTSUTRIM_MASK; else priv->calib1 = SW_CALIB1_VAL; platform_set_drvdata(pdev, priv); ret = rzg2l_thermal_init(priv); if (ret) { dev_err(dev, "Failed to start TSU"); goto err; } zone = devm_thermal_of_zone_register(dev, 0, priv, &rzg2l_tz_of_ops); if (IS_ERR(zone)) { dev_err(dev, "Can't register thermal zone"); ret = PTR_ERR(zone); goto err; } priv->zone = zone; ret = thermal_add_hwmon_sysfs(priv->zone); if (ret) goto err; dev_dbg(dev, "TSU probed with %s calibration values", rzg2l_thermal_read(priv, OTPTSUTRIM_REG(0)) ? "hw" : "sw"); return 0; err: rzg2l_thermal_reset_assert_pm_disable_put(pdev); return ret; } static const struct of_device_id rzg2l_thermal_dt_ids[] = { { .compatible = "renesas,rzg2l-tsu", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rzg2l_thermal_dt_ids); static struct platform_driver rzg2l_thermal_driver = { .driver = { .name = "rzg2l_thermal", .of_match_table = rzg2l_thermal_dt_ids, }, .probe = rzg2l_thermal_probe, .remove = rzg2l_thermal_remove, }; module_platform_driver(rzg2l_thermal_driver); MODULE_DESCRIPTION("Renesas RZ/G2L TSU Thermal Sensor Driver"); MODULE_AUTHOR("Biju Das <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/rzg2l_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Linaro Limited. * * Author: Daniel Lezcano <[email protected]> * */ #define pr_fmt(fmt) "cpuidle cooling: " fmt #include <linux/cpu.h> #include <linux/cpu_cooling.h> #include <linux/cpuidle.h> #include <linux/device.h> #include <linux/err.h> #include <linux/idle_inject.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/thermal.h> /** * struct cpuidle_cooling_device - data for the idle cooling device * @ii_dev: an atomic to keep track of the last task exiting the idle cycle * @state: a normalized integer giving the state of the cooling device */ struct cpuidle_cooling_device { struct idle_inject_device *ii_dev; unsigned long state; }; /** * cpuidle_cooling_runtime - Running time computation * @idle_duration_us: CPU idle time to inject in microseconds * @state: a percentile based number * * The running duration is computed from the idle injection duration * which is fixed. If we reach 100% of idle injection ratio, that * means the running duration is zero. If we have a 50% ratio * injection, that means we have equal duration for idle and for * running duration. * * The formula is deduced as follows: * * running = idle x ((100 / ratio) - 1) * * For precision purpose for integer math, we use the following: * * running = (idle x 100) / ratio - idle * * For example, if we have an injected duration of 50%, then we end up * with 10ms of idle injection and 10ms of running duration. * * Return: An unsigned int for a usec based runtime duration. */ static unsigned int cpuidle_cooling_runtime(unsigned int idle_duration_us, unsigned long state) { if (!state) return 0; return ((idle_duration_us * 100) / state) - idle_duration_us; } /** * cpuidle_cooling_get_max_state - Get the maximum state * @cdev : the thermal cooling device * @state : a pointer to the state variable to be filled * * The function always returns 100 as the injection ratio. It is * percentile based for consistency accross different platforms. * * Return: The function can not fail, it is always zero */ static int cpuidle_cooling_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { /* * Depending on the configuration or the hardware, the running * cycle and the idle cycle could be different. We want to * unify that to an 0..100 interval, so the set state * interface will be the same whatever the platform is. * * The state 100% will make the cluster 100% ... idle. A 0% * injection ratio means no idle injection at all and 50% * means for 10ms of idle injection, we have 10ms of running * time. */ *state = 100; return 0; } /** * cpuidle_cooling_get_cur_state - Get the current cooling state * @cdev: the thermal cooling device * @state: a pointer to the state * * The function just copies the state value from the private thermal * cooling device structure, the mapping is 1 <-> 1. * * Return: The function can not fail, it is always zero */ static int cpuidle_cooling_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct cpuidle_cooling_device *idle_cdev = cdev->devdata; *state = idle_cdev->state; return 0; } /** * cpuidle_cooling_set_cur_state - Set the current cooling state * @cdev: the thermal cooling device * @state: the target state * * The function checks first if we are initiating the mitigation which * in turn wakes up all the idle injection tasks belonging to the idle * cooling device. In any case, it updates the internal state for the * cooling device. * * Return: The function can not fail, it is always zero */ static int cpuidle_cooling_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct cpuidle_cooling_device *idle_cdev = cdev->devdata; struct idle_inject_device *ii_dev = idle_cdev->ii_dev; unsigned long current_state = idle_cdev->state; unsigned int runtime_us, idle_duration_us; idle_cdev->state = state; idle_inject_get_duration(ii_dev, &runtime_us, &idle_duration_us); runtime_us = cpuidle_cooling_runtime(idle_duration_us, state); idle_inject_set_duration(ii_dev, runtime_us, idle_duration_us); if (current_state == 0 && state > 0) { idle_inject_start(ii_dev); } else if (current_state > 0 && !state) { idle_inject_stop(ii_dev); } return 0; } /** * cpuidle_cooling_ops - thermal cooling device ops */ static struct thermal_cooling_device_ops cpuidle_cooling_ops = { .get_max_state = cpuidle_cooling_get_max_state, .get_cur_state = cpuidle_cooling_get_cur_state, .set_cur_state = cpuidle_cooling_set_cur_state, }; /** * __cpuidle_cooling_register: register the cooling device * @drv: a cpuidle driver structure pointer * @np: a device node structure pointer used for the thermal binding * * This function is in charge of allocating the cpuidle cooling device * structure, the idle injection, initialize them and register the * cooling device to the thermal framework. * * Return: zero on success, a negative value returned by one of the * underlying subsystem in case of error */ static int __cpuidle_cooling_register(struct device_node *np, struct cpuidle_driver *drv) { struct idle_inject_device *ii_dev; struct cpuidle_cooling_device *idle_cdev; struct thermal_cooling_device *cdev; struct device *dev; unsigned int idle_duration_us = TICK_USEC; unsigned int latency_us = UINT_MAX; char *name; int ret; idle_cdev = kzalloc(sizeof(*idle_cdev), GFP_KERNEL); if (!idle_cdev) { ret = -ENOMEM; goto out; } ii_dev = idle_inject_register(drv->cpumask); if (!ii_dev) { ret = -EINVAL; goto out_kfree; } of_property_read_u32(np, "duration-us", &idle_duration_us); of_property_read_u32(np, "exit-latency-us", &latency_us); idle_inject_set_duration(ii_dev, TICK_USEC, idle_duration_us); idle_inject_set_latency(ii_dev, latency_us); idle_cdev->ii_dev = ii_dev; dev = get_cpu_device(cpumask_first(drv->cpumask)); name = kasprintf(GFP_KERNEL, "idle-%s", dev_name(dev)); if (!name) { ret = -ENOMEM; goto out_unregister; } cdev = thermal_of_cooling_device_register(np, name, idle_cdev, &cpuidle_cooling_ops); if (IS_ERR(cdev)) { ret = PTR_ERR(cdev); goto out_kfree_name; } pr_debug("%s: Idle injection set with idle duration=%u, latency=%u\n", name, idle_duration_us, latency_us); kfree(name); return 0; out_kfree_name: kfree(name); out_unregister: idle_inject_unregister(ii_dev); out_kfree: kfree(idle_cdev); out: return ret; } /** * cpuidle_cooling_register - Idle cooling device initialization function * @drv: a cpuidle driver structure pointer * * This function is in charge of creating a cooling device per cpuidle * driver and register it to the thermal framework. */ void cpuidle_cooling_register(struct cpuidle_driver *drv) { struct device_node *cooling_node; struct device_node *cpu_node; int cpu, ret; for_each_cpu(cpu, drv->cpumask) { cpu_node = of_cpu_device_node_get(cpu); cooling_node = of_get_child_by_name(cpu_node, "thermal-idle"); of_node_put(cpu_node); if (!cooling_node) { pr_debug("'thermal-idle' node not found for cpu%d\n", cpu); continue; } ret = __cpuidle_cooling_register(cooling_node, drv); of_node_put(cooling_node); if (ret) { pr_err("Failed to register the cpuidle cooling device" \ "for cpu%d: %d\n", cpu, ret); break; } } }
linux-master
drivers/thermal/cpuidle_cooling.c
// SPDX-License-Identifier: GPL-2.0-only /* * Dove thermal sensor driver * * Copyright (C) 2013 Andrew Lunn <[email protected]> */ #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/thermal.h> #define DOVE_THERMAL_TEMP_OFFSET 1 #define DOVE_THERMAL_TEMP_MASK 0x1FF /* Dove Thermal Manager Control and Status Register */ #define PMU_TM_DISABLE_OFFS 0 #define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS) /* Dove Theraml Diode Control 0 Register */ #define PMU_TDC0_SW_RST_MASK (0x1 << 1) #define PMU_TDC0_SEL_VCAL_OFFS 5 #define PMU_TDC0_SEL_VCAL_MASK (0x3 << PMU_TDC0_SEL_VCAL_OFFS) #define PMU_TDC0_REF_CAL_CNT_OFFS 11 #define PMU_TDC0_REF_CAL_CNT_MASK (0x1FF << PMU_TDC0_REF_CAL_CNT_OFFS) #define PMU_TDC0_AVG_NUM_OFFS 25 #define PMU_TDC0_AVG_NUM_MASK (0x7 << PMU_TDC0_AVG_NUM_OFFS) /* Dove Thermal Diode Control 1 Register */ #define PMU_TEMP_DIOD_CTRL1_REG 0x04 #define PMU_TDC1_TEMP_VALID_MASK (0x1 << 10) /* Dove Thermal Sensor Dev Structure */ struct dove_thermal_priv { void __iomem *sensor; void __iomem *control; }; static int dove_init_sensor(const struct dove_thermal_priv *priv) { u32 reg; u32 i; /* Configure the Diode Control Register #0 */ reg = readl_relaxed(priv->control); /* Use average of 2 */ reg &= ~PMU_TDC0_AVG_NUM_MASK; reg |= (0x1 << PMU_TDC0_AVG_NUM_OFFS); /* Reference calibration value */ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; reg |= (0x0F1 << PMU_TDC0_REF_CAL_CNT_OFFS); /* Set the high level reference for calibration */ reg &= ~PMU_TDC0_SEL_VCAL_MASK; reg |= (0x2 << PMU_TDC0_SEL_VCAL_OFFS); writel(reg, priv->control); /* Reset the sensor */ reg = readl_relaxed(priv->control); writel((reg | PMU_TDC0_SW_RST_MASK), priv->control); writel(reg, priv->control); /* Enable the sensor */ reg = readl_relaxed(priv->sensor); reg &= ~PMU_TM_DISABLE_MASK; writel(reg, priv->sensor); /* Poll the sensor for the first reading */ for (i = 0; i < 1000000; i++) { reg = readl_relaxed(priv->sensor); if (reg & DOVE_THERMAL_TEMP_MASK) break; } if (i == 1000000) return -EIO; return 0; } static int dove_get_temp(struct thermal_zone_device *thermal, int *temp) { unsigned long reg; struct dove_thermal_priv *priv = thermal_zone_device_priv(thermal); /* Valid check */ reg = readl_relaxed(priv->control + PMU_TEMP_DIOD_CTRL1_REG); if ((reg & PMU_TDC1_TEMP_VALID_MASK) == 0x0) return -EIO; /* * Calculate temperature. According to Marvell internal * documentation the formula for this is: * Celsius = (322-reg)/1.3625 */ reg = readl_relaxed(priv->sensor); reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK; *temp = ((3220000000UL - (10000000UL * reg)) / 13625); return 0; } static struct thermal_zone_device_ops ops = { .get_temp = dove_get_temp, }; static const struct of_device_id dove_thermal_id_table[] = { { .compatible = "marvell,dove-thermal" }, {} }; static int dove_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *thermal = NULL; struct dove_thermal_priv *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->sensor = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(priv->sensor)) return PTR_ERR(priv->sensor); priv->control = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); if (IS_ERR(priv->control)) return PTR_ERR(priv->control); ret = dove_init_sensor(priv); if (ret) { dev_err(&pdev->dev, "Failed to initialize sensor\n"); return ret; } thermal = thermal_tripless_zone_device_register("dove_thermal", priv, &ops, NULL); if (IS_ERR(thermal)) { dev_err(&pdev->dev, "Failed to register thermal zone device\n"); return PTR_ERR(thermal); } ret = thermal_zone_device_enable(thermal); if (ret) { thermal_zone_device_unregister(thermal); return ret; } platform_set_drvdata(pdev, thermal); return 0; } static int dove_thermal_exit(struct platform_device *pdev) { struct thermal_zone_device *dove_thermal = platform_get_drvdata(pdev); thermal_zone_device_unregister(dove_thermal); return 0; } MODULE_DEVICE_TABLE(of, dove_thermal_id_table); static struct platform_driver dove_thermal_driver = { .probe = dove_thermal_probe, .remove = dove_thermal_exit, .driver = { .name = "dove_thermal", .of_match_table = dove_thermal_id_table, }, }; module_platform_driver(dove_thermal_driver); MODULE_AUTHOR("Andrew Lunn <[email protected]>"); MODULE_DESCRIPTION("Dove thermal driver"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/dove_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * TI Bandgap temperature sensor driver for J72XX SoC Family * * Copyright (C) 2021 Texas Instruments Incorporated - http://www.ti.com/ */ #include <linux/math.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/types.h> #include <linux/io.h> #include <linux/thermal.h> #include <linux/of.h> #include <linux/delay.h> #include <linux/slab.h> #define K3_VTM_DEVINFO_PWR0_OFFSET 0x4 #define K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK 0xf0 #define K3_VTM_TMPSENS0_CTRL_OFFSET 0x300 #define K3_VTM_MISC_CTRL_OFFSET 0xc #define K3_VTM_TMPSENS_STAT_OFFSET 0x8 #define K3_VTM_ANYMAXT_OUTRG_ALERT_EN 0x1 #define K3_VTM_MISC_CTRL2_OFFSET 0x10 #define K3_VTM_TS_STAT_DTEMP_MASK 0x3ff #define K3_VTM_MAX_NUM_TS 8 #define K3_VTM_TMPSENS_CTRL_SOC BIT(5) #define K3_VTM_TMPSENS_CTRL_CLRZ BIT(6) #define K3_VTM_TMPSENS_CTRL_CLKON_REQ BIT(7) #define K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN BIT(11) #define K3_VTM_CORRECTION_TEMP_CNT 3 #define MINUS40CREF 5 #define PLUS30CREF 253 #define PLUS125CREF 730 #define PLUS150CREF 940 #define TABLE_SIZE 1024 #define MAX_TEMP 123000 #define COOL_DOWN_TEMP 105000 #define FACTORS_REDUCTION 13 static int *derived_table; static int compute_value(int index, const s64 *factors, int nr_factors, int reduction) { s64 value = 0; int i; for (i = 0; i < nr_factors; i++) value += factors[i] * int_pow(index, i); return (int)div64_s64(value, int_pow(10, reduction)); } static void init_table(int factors_size, int *table, const s64 *factors) { int i; for (i = 0; i < TABLE_SIZE; i++) table[i] = compute_value(i, factors, factors_size, FACTORS_REDUCTION); } /** * struct err_values - structure containing error/reference values * @refs: reference error values for -40C, 30C, 125C & 150C * @errs: Actual error values for -40C, 30C, 125C & 150C read from the efuse */ struct err_values { int refs[4]; int errs[4]; }; static void create_table_segments(struct err_values *err_vals, int seg, int *ref_table) { int m = 0, c, num, den, i, err, idx1, idx2, err1, err2, ref1, ref2; if (seg == 0) idx1 = 0; else idx1 = err_vals->refs[seg]; idx2 = err_vals->refs[seg + 1]; err1 = err_vals->errs[seg]; err2 = err_vals->errs[seg + 1]; ref1 = err_vals->refs[seg]; ref2 = err_vals->refs[seg + 1]; /* * Calculate the slope with adc values read from the register * as the y-axis param and err in adc value as x-axis param */ num = ref2 - ref1; den = err2 - err1; if (den) m = num / den; c = ref2 - m * err2; /* * Take care of divide by zero error if error values are same * Or when the slope is 0 */ if (den != 0 && m != 0) { for (i = idx1; i <= idx2; i++) { err = (i - c) / m; if (((i + err) < 0) || ((i + err) >= TABLE_SIZE)) continue; derived_table[i] = ref_table[i + err]; } } else { /* Constant error take care of divide by zero */ for (i = idx1; i <= idx2; i++) { if (((i + err1) < 0) || ((i + err1) >= TABLE_SIZE)) continue; derived_table[i] = ref_table[i + err1]; } } } static int prep_lookup_table(struct err_values *err_vals, int *ref_table) { int inc, i, seg; /* * Fill up the lookup table under 3 segments * region -40C to +30C * region +30C to +125C * region +125C to +150C */ for (seg = 0; seg < 3; seg++) create_table_segments(err_vals, seg, ref_table); /* Get to the first valid temperature */ i = 0; while (!derived_table[i]) i++; /* * Get to the last zero index and back fill the temperature for * sake of continuity */ if (i) { /* 300 milli celsius steps */ while (i--) derived_table[i] = derived_table[i + 1] - 300; } /* * Fill the last trailing 0s which are unfilled with increments of * 100 milli celsius till 1023 code */ i = TABLE_SIZE - 1; while (!derived_table[i]) i--; i++; inc = 1; while (i < TABLE_SIZE) { derived_table[i] = derived_table[i - 1] + inc * 100; i++; } return 0; } struct k3_thermal_data; struct k3_j72xx_bandgap { struct device *dev; void __iomem *base; void __iomem *cfg2_base; struct k3_thermal_data *ts_data[K3_VTM_MAX_NUM_TS]; }; /* common data structures */ struct k3_thermal_data { struct k3_j72xx_bandgap *bgp; u32 ctrl_offset; u32 stat_offset; }; static int two_cmp(int tmp, int mask) { tmp = ~(tmp); tmp &= mask; tmp += 1; /* Return negative value */ return (0 - tmp); } static unsigned int vtm_get_best_value(unsigned int s0, unsigned int s1, unsigned int s2) { int d01 = abs(s0 - s1); int d02 = abs(s0 - s2); int d12 = abs(s1 - s2); if (d01 <= d02 && d01 <= d12) return (s0 + s1) / 2; if (d02 <= d01 && d02 <= d12) return (s0 + s2) / 2; return (s1 + s2) / 2; } static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata, int *temp) { struct k3_j72xx_bandgap *bgp; unsigned int dtemp, s0, s1, s2; bgp = devdata->bgp; /* * Errata is applicable for am654 pg 1.0 silicon/J7ES. There * is a variation of the order for certain degree centigrade on AM654. * Work around that by getting the average of two closest * readings out of three readings everytime we want to * report temperatures. * * Errata workaround. */ s0 = readl(bgp->base + devdata->stat_offset) & K3_VTM_TS_STAT_DTEMP_MASK; s1 = readl(bgp->base + devdata->stat_offset) & K3_VTM_TS_STAT_DTEMP_MASK; s2 = readl(bgp->base + devdata->stat_offset) & K3_VTM_TS_STAT_DTEMP_MASK; dtemp = vtm_get_best_value(s0, s1, s2); if (dtemp < 0 || dtemp >= TABLE_SIZE) return -EINVAL; *temp = derived_table[dtemp]; return 0; } /* Get temperature callback function for thermal zone */ static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { return k3_bgp_read_temp(thermal_zone_device_priv(tz), temp); } static const struct thermal_zone_device_ops k3_of_thermal_ops = { .get_temp = k3_thermal_get_temp, }; static int k3_j72xx_bandgap_temp_to_adc_code(int temp) { int low = 0, high = TABLE_SIZE - 1, mid; if (temp > 160000 || temp < -50000) return -EINVAL; /* Binary search to find the adc code */ while (low < (high - 1)) { mid = (low + high) / 2; if (temp <= derived_table[mid]) high = mid; else low = mid; } return mid; } static void get_efuse_values(int id, struct k3_thermal_data *data, int *err, void __iomem *fuse_base) { int i, tmp, pow; int ct_offsets[5][K3_VTM_CORRECTION_TEMP_CNT] = { { 0x0, 0x8, 0x4 }, { 0x0, 0x8, 0x4 }, { 0x0, -1, 0x4 }, { 0x0, 0xC, -1 }, { 0x0, 0xc, 0x8 } }; int ct_bm[5][K3_VTM_CORRECTION_TEMP_CNT] = { { 0x3f, 0x1fe000, 0x1ff }, { 0xfc0, 0x1fe000, 0x3fe00 }, { 0x3f000, 0x7f800000, 0x7fc0000 }, { 0xfc0000, 0x1fe0, 0x1f800000 }, { 0x3f000000, 0x1fe000, 0x1ff0 } }; for (i = 0; i < 3; i++) { /* Extract the offset value using bit-mask */ if (ct_offsets[id][i] == -1 && i == 1) { /* 25C offset Case of Sensor 2 split between 2 regs */ tmp = (readl(fuse_base + 0x8) & 0xE0000000) >> (29); tmp |= ((readl(fuse_base + 0xC) & 0x1F) << 3); pow = tmp & 0x80; } else if (ct_offsets[id][i] == -1 && i == 2) { /* 125C Case of Sensor 3 split between 2 regs */ tmp = (readl(fuse_base + 0x4) & 0xF8000000) >> (27); tmp |= ((readl(fuse_base + 0x8) & 0xF) << 5); pow = tmp & 0x100; } else { tmp = readl(fuse_base + ct_offsets[id][i]); tmp &= ct_bm[id][i]; tmp = tmp >> __ffs(ct_bm[id][i]); /* Obtain the sign bit pow*/ pow = ct_bm[id][i] >> __ffs(ct_bm[id][i]); pow += 1; pow /= 2; } /* Check for negative value */ if (tmp & pow) { /* 2's complement value */ tmp = two_cmp(tmp, ct_bm[id][i] >> __ffs(ct_bm[id][i])); } err[i] = tmp; } /* Err value for 150C is set to 0 */ err[i] = 0; } static void print_look_up_table(struct device *dev, int *ref_table) { int i; dev_dbg(dev, "The contents of derived array\n"); dev_dbg(dev, "Code Temperature\n"); for (i = 0; i < TABLE_SIZE; i++) dev_dbg(dev, "%d %d %d\n", i, derived_table[i], ref_table[i]); } struct k3_j72xx_bandgap_data { const bool has_errata_i2128; }; static int k3_j72xx_bandgap_probe(struct platform_device *pdev) { int ret = 0, cnt, val, id; int high_max, low_temp; struct resource *res; struct device *dev = &pdev->dev; struct k3_j72xx_bandgap *bgp; struct k3_thermal_data *data; bool workaround_needed = false; const struct k3_j72xx_bandgap_data *driver_data; struct thermal_zone_device *ti_thermal; int *ref_table; struct err_values err_vals; void __iomem *fuse_base; const s64 golden_factors[] = { -490019999999999936, 3251200000000000, -1705800000000, 603730000, -92627, }; const s64 pvt_wa_factors[] = { -415230000000000000, 3126600000000000, -1157800000000, }; bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); if (!bgp) return -ENOMEM; bgp->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bgp->base = devm_ioremap_resource(dev, res); if (IS_ERR(bgp->base)) return PTR_ERR(bgp->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); bgp->cfg2_base = devm_ioremap_resource(dev, res); if (IS_ERR(bgp->cfg2_base)) return PTR_ERR(bgp->cfg2_base); driver_data = of_device_get_match_data(dev); if (driver_data) workaround_needed = driver_data->has_errata_i2128; /* * Some of TI's J721E SoCs require a software trimming procedure * for the temperature monitors to function properly. To determine * if this particular SoC is NOT affected, both bits in the * WKUP_SPARE_FUSE0[31:30] will be set (0xC0000000) indicating * when software trimming should NOT be applied. * * https://www.ti.com/lit/er/sprz455c/sprz455c.pdf */ if (workaround_needed) { res = platform_get_resource(pdev, IORESOURCE_MEM, 2); fuse_base = devm_ioremap_resource(dev, res); if (IS_ERR(fuse_base)) return PTR_ERR(fuse_base); if ((readl(fuse_base) & 0xc0000000) == 0xc0000000) workaround_needed = false; } dev_dbg(bgp->dev, "Work around %sneeded\n", workaround_needed ? "" : "not "); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } /* Get the sensor count in the VTM */ val = readl(bgp->base + K3_VTM_DEVINFO_PWR0_OFFSET); cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK; cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK); data = devm_kcalloc(bgp->dev, cnt, sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto err_alloc; } ref_table = kzalloc(sizeof(*ref_table) * TABLE_SIZE, GFP_KERNEL); if (!ref_table) { ret = -ENOMEM; goto err_alloc; } derived_table = devm_kzalloc(bgp->dev, sizeof(*derived_table) * TABLE_SIZE, GFP_KERNEL); if (!derived_table) { ret = -ENOMEM; goto err_free_ref_table; } if (!workaround_needed) init_table(5, ref_table, golden_factors); else init_table(3, ref_table, pvt_wa_factors); /* Register the thermal sensors */ for (id = 0; id < cnt; id++) { data[id].bgp = bgp; data[id].ctrl_offset = K3_VTM_TMPSENS0_CTRL_OFFSET + id * 0x20; data[id].stat_offset = data[id].ctrl_offset + K3_VTM_TMPSENS_STAT_OFFSET; if (workaround_needed) { /* ref adc values for -40C, 30C & 125C respectively */ err_vals.refs[0] = MINUS40CREF; err_vals.refs[1] = PLUS30CREF; err_vals.refs[2] = PLUS125CREF; err_vals.refs[3] = PLUS150CREF; get_efuse_values(id, &data[id], err_vals.errs, fuse_base); } if (id == 0 && workaround_needed) prep_lookup_table(&err_vals, ref_table); else if (id == 0 && !workaround_needed) memcpy(derived_table, ref_table, TABLE_SIZE * 4); val = readl(data[id].bgp->cfg2_base + data[id].ctrl_offset); val |= (K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN | K3_VTM_TMPSENS_CTRL_SOC | K3_VTM_TMPSENS_CTRL_CLRZ | BIT(4)); writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset); bgp->ts_data[id] = &data[id]; ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, &data[id], &k3_of_thermal_ops); if (IS_ERR(ti_thermal)) { dev_err(bgp->dev, "thermal zone device is NULL\n"); ret = PTR_ERR(ti_thermal); goto err_free_ref_table; } } /* * Program TSHUT thresholds * Step 1: set the thresholds to ~123C and 105C WKUP_VTM_MISC_CTRL2 * Step 2: WKUP_VTM_TMPSENS_CTRL_j set the MAXT_OUTRG_EN bit * This is already taken care as per of init * Step 3: WKUP_VTM_MISC_CTRL set the ANYMAXT_OUTRG_ALERT_EN bit */ high_max = k3_j72xx_bandgap_temp_to_adc_code(MAX_TEMP); low_temp = k3_j72xx_bandgap_temp_to_adc_code(COOL_DOWN_TEMP); writel((low_temp << 16) | high_max, data[0].bgp->cfg2_base + K3_VTM_MISC_CTRL2_OFFSET); mdelay(100); writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, data[0].bgp->cfg2_base + K3_VTM_MISC_CTRL_OFFSET); print_look_up_table(dev, ref_table); /* * Now that the derived_table has the appropriate look up values * Free up the ref_table */ kfree(ref_table); return 0; err_free_ref_table: kfree(ref_table); err_alloc: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return ret; } static int k3_j72xx_bandgap_remove(struct platform_device *pdev) { pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = { .has_errata_i2128 = true, }; static const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = { .has_errata_i2128 = false, }; static const struct of_device_id of_k3_j72xx_bandgap_match[] = { { .compatible = "ti,j721e-vtm", .data = &k3_j72xx_bandgap_j721e_data, }, { .compatible = "ti,j7200-vtm", .data = &k3_j72xx_bandgap_j7200_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, of_k3_j72xx_bandgap_match); static struct platform_driver k3_j72xx_bandgap_sensor_driver = { .probe = k3_j72xx_bandgap_probe, .remove = k3_j72xx_bandgap_remove, .driver = { .name = "k3-j72xx-soc-thermal", .of_match_table = of_k3_j72xx_bandgap_match, }, }; module_platform_driver(k3_j72xx_bandgap_sensor_driver); MODULE_DESCRIPTION("K3 bandgap temperature sensor driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("J Keerthy <[email protected]>");
linux-master
drivers/thermal/k3_j72xx_bandgap.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2023 Linaro Limited * Copyright 2023 Intel Corporation * * Library routines for populating a generic thermal trip point structure * with data obtained by evaluating a specific object in the ACPI Namespace. */ #include <linux/acpi.h> #include <linux/units.h> #include "thermal_core.h" /* * Minimum temperature for full military grade is 218°K (-55°C) and * max temperature is 448°K (175°C). We can consider those values as * the boundaries for the [trips] temperature returned by the * firmware. Any values out of these boundaries may be considered * bogus and we can assume the firmware has no data to provide. */ #define TEMP_MIN_DECIK 2180 #define TEMP_MAX_DECIK 4480 static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name, int *ret_temp) { unsigned long long temp; acpi_status status; status = acpi_evaluate_integer(adev->handle, obj_name, NULL, &temp); if (ACPI_FAILURE(status)) { acpi_handle_debug(adev->handle, "%s evaluation failed\n", obj_name); return -ENODATA; } if (temp >= TEMP_MIN_DECIK && temp <= TEMP_MAX_DECIK) { *ret_temp = deci_kelvin_to_millicelsius(temp); } else { acpi_handle_debug(adev->handle, "%s result %llu out of range\n", obj_name, temp); *ret_temp = THERMAL_TEMP_INVALID; } return 0; } /** * thermal_acpi_active_trip_temp - Retrieve active trip point temperature * @adev: Target thermal zone ACPI device object. * @id: Active cooling level (0 - 9). * @ret_temp: Address to store the retrieved temperature value on success. * * Evaluate the _ACx object for the thermal zone represented by @adev to obtain * the temperature of the active cooling trip point corresponding to the active * cooling level given by @id. * * Return 0 on success or a negative error value on failure. */ int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp) { char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'}; if (id < 0 || id > 9) return -EINVAL; return thermal_acpi_trip_temp(adev, obj_name, ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp); /** * thermal_acpi_passive_trip_temp - Retrieve passive trip point temperature * @adev: Target thermal zone ACPI device object. * @ret_temp: Address to store the retrieved temperature value on success. * * Evaluate the _PSV object for the thermal zone represented by @adev to obtain * the temperature of the passive cooling trip point. * * Return 0 on success or -ENODATA on failure. */ int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp) { return thermal_acpi_trip_temp(adev, "_PSV", ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp); /** * thermal_acpi_hot_trip_temp - Retrieve hot trip point temperature * @adev: Target thermal zone ACPI device object. * @ret_temp: Address to store the retrieved temperature value on success. * * Evaluate the _HOT object for the thermal zone represented by @adev to obtain * the temperature of the trip point at which the system is expected to be put * into the S4 sleep state. * * Return 0 on success or -ENODATA on failure. */ int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp) { return thermal_acpi_trip_temp(adev, "_HOT", ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp); /** * thermal_acpi_critical_trip_temp - Retrieve critical trip point temperature * @adev: Target thermal zone ACPI device object. * @ret_temp: Address to store the retrieved temperature value on success. * * Evaluate the _CRT object for the thermal zone represented by @adev to obtain * the temperature of the critical cooling trip point. * * Return 0 on success or -ENODATA on failure. */ int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp) { return thermal_acpi_trip_temp(adev, "_CRT", ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_critical_trip_temp);
linux-master
drivers/thermal/thermal_acpi.c
// SPDX-License-Identifier: GPL-2.0+ /* * Author: zhanghongchen <[email protected]> * Yinbo Zhu <[email protected]> * Copyright (C) 2022-2023 Loongson Technology Corporation Limited */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/thermal.h> #include <linux/units.h> #include "thermal_hwmon.h" #define LOONGSON2_MAX_SENSOR_SEL_NUM 3 #define LOONGSON2_THSENS_CTRL_HI_REG 0x0 #define LOONGSON2_THSENS_CTRL_LOW_REG 0x8 #define LOONGSON2_THSENS_STATUS_REG 0x10 #define LOONGSON2_THSENS_OUT_REG 0x14 #define LOONGSON2_THSENS_INT_LO BIT(0) #define LOONGSON2_THSENS_INT_HIGH BIT(1) #define LOONGSON2_THSENS_OUT_MASK 0xFF struct loongson2_thermal_chip_data { unsigned int thermal_sensor_sel; }; struct loongson2_thermal_data { void __iomem *regs; const struct loongson2_thermal_chip_data *chip_data; }; static int loongson2_thermal_set(struct loongson2_thermal_data *data, int low, int high, bool enable) { u64 reg_ctrl = 0; int reg_off = data->chip_data->thermal_sensor_sel * 2; low = clamp(-40, low, high); high = clamp(125, low, high); low += HECTO; high += HECTO; reg_ctrl = low; reg_ctrl |= enable ? 0x100 : 0; writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_LOW_REG + reg_off); reg_ctrl = high; reg_ctrl |= enable ? 0x100 : 0; writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_HI_REG + reg_off); return 0; } static int loongson2_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { u32 reg_val; struct loongson2_thermal_data *data = thermal_zone_device_priv(tz); reg_val = readl(data->regs + LOONGSON2_THSENS_OUT_REG); *temp = ((reg_val & LOONGSON2_THSENS_OUT_MASK) - HECTO) * KILO; return 0; } static irqreturn_t loongson2_thermal_irq_thread(int irq, void *dev) { struct thermal_zone_device *tzd = dev; struct loongson2_thermal_data *data = thermal_zone_device_priv(tzd); writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs + LOONGSON2_THSENS_STATUS_REG); thermal_zone_device_update(tzd, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static int loongson2_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { struct loongson2_thermal_data *data = thermal_zone_device_priv(tz); return loongson2_thermal_set(data, low/MILLI, high/MILLI, true); } static const struct thermal_zone_device_ops loongson2_of_thermal_ops = { .get_temp = loongson2_thermal_get_temp, .set_trips = loongson2_thermal_set_trips, }; static int loongson2_thermal_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct loongson2_thermal_data *data; struct thermal_zone_device *tzd; int ret, irq, i; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->chip_data = device_get_match_data(dev); data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) return PTR_ERR(data->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs + LOONGSON2_THSENS_STATUS_REG); loongson2_thermal_set(data, 0, 0, false); for (i = 0; i <= LOONGSON2_MAX_SENSOR_SEL_NUM; i++) { tzd = devm_thermal_of_zone_register(dev, i, data, &loongson2_of_thermal_ops); if (!IS_ERR(tzd)) break; if (PTR_ERR(tzd) != ENODEV) continue; return dev_err_probe(dev, PTR_ERR(tzd), "failed to register"); } ret = devm_request_threaded_irq(dev, irq, NULL, loongson2_thermal_irq_thread, IRQF_ONESHOT, "loongson2_thermal", tzd); if (ret < 0) return dev_err_probe(dev, ret, "failed to request alarm irq\n"); devm_thermal_add_hwmon_sysfs(dev, tzd); return 0; } static const struct loongson2_thermal_chip_data loongson2_thermal_ls2k1000_data = { .thermal_sensor_sel = 0, }; static const struct of_device_id of_loongson2_thermal_match[] = { { .compatible = "loongson,ls2k1000-thermal", .data = &loongson2_thermal_ls2k1000_data, }, { /* end */ } }; MODULE_DEVICE_TABLE(of, of_loongson2_thermal_match); static struct platform_driver loongson2_thermal_driver = { .driver = { .name = "loongson2_thermal", .of_match_table = of_loongson2_thermal_match, }, .probe = loongson2_thermal_probe, }; module_platform_driver(loongson2_thermal_driver); MODULE_DESCRIPTION("Loongson2 thermal driver"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/loongson2_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * user_space.c - A simple user space Thermal events notifier * * Copyright (C) 2012 Intel Corp * Copyright (C) 2012 Durgadoss R <[email protected]> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/slab.h> #include <linux/thermal.h> #include "thermal_core.h" static int user_space_bind(struct thermal_zone_device *tz) { pr_info_once("Consider using thermal netlink events interface\n"); return 0; } /** * notify_user_space - Notifies user space about thermal events * @tz: thermal_zone_device * @trip: trip point index * * This function notifies the user space through UEvents. */ static int notify_user_space(struct thermal_zone_device *tz, int trip) { char *thermal_prop[5]; int i; lockdep_assert_held(&tz->lock); thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", tz->type); thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", tz->temperature); thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=%d", trip); thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", tz->notify_event); thermal_prop[4] = NULL; kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop); for (i = 0; i < 4; ++i) kfree(thermal_prop[i]); return 0; } static struct thermal_governor thermal_gov_user_space = { .name = "user_space", .throttle = notify_user_space, .bind_to_tz = user_space_bind, }; THERMAL_GOVERNOR_DECLARE(thermal_gov_user_space);
linux-master
drivers/thermal/gov_user_space.c
// SPDX-License-Identifier: GPL-2.0-only /* * Kirkwood thermal sensor driver * * Copyright (C) 2012 Nobuhiro Iwamatsu <[email protected]> */ #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/thermal.h> #define KIRKWOOD_THERMAL_VALID_OFFSET 9 #define KIRKWOOD_THERMAL_VALID_MASK 0x1 #define KIRKWOOD_THERMAL_TEMP_OFFSET 10 #define KIRKWOOD_THERMAL_TEMP_MASK 0x1FF /* Kirkwood Thermal Sensor Dev Structure */ struct kirkwood_thermal_priv { void __iomem *sensor; }; static int kirkwood_get_temp(struct thermal_zone_device *thermal, int *temp) { unsigned long reg; struct kirkwood_thermal_priv *priv = thermal_zone_device_priv(thermal); reg = readl_relaxed(priv->sensor); /* Valid check */ if (!((reg >> KIRKWOOD_THERMAL_VALID_OFFSET) & KIRKWOOD_THERMAL_VALID_MASK)) return -EIO; /* * Calculate temperature. According to Marvell internal * documentation the formula for this is: * Celsius = (322-reg)/1.3625 */ reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) & KIRKWOOD_THERMAL_TEMP_MASK; *temp = ((3220000000UL - (10000000UL * reg)) / 13625); return 0; } static struct thermal_zone_device_ops ops = { .get_temp = kirkwood_get_temp, }; static const struct of_device_id kirkwood_thermal_id_table[] = { { .compatible = "marvell,kirkwood-thermal" }, {} }; static int kirkwood_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *thermal = NULL; struct kirkwood_thermal_priv *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->sensor = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(priv->sensor)) return PTR_ERR(priv->sensor); thermal = thermal_tripless_zone_device_register("kirkwood_thermal", priv, &ops, NULL); if (IS_ERR(thermal)) { dev_err(&pdev->dev, "Failed to register thermal zone device\n"); return PTR_ERR(thermal); } ret = thermal_zone_device_enable(thermal); if (ret) { thermal_zone_device_unregister(thermal); dev_err(&pdev->dev, "Failed to enable thermal zone device\n"); return ret; } platform_set_drvdata(pdev, thermal); return 0; } static int kirkwood_thermal_exit(struct platform_device *pdev) { struct thermal_zone_device *kirkwood_thermal = platform_get_drvdata(pdev); thermal_zone_device_unregister(kirkwood_thermal); return 0; } MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table); static struct platform_driver kirkwood_thermal_driver = { .probe = kirkwood_thermal_probe, .remove = kirkwood_thermal_exit, .driver = { .name = "kirkwood_thermal", .of_match_table = kirkwood_thermal_id_table, }, }; module_platform_driver(kirkwood_thermal_driver); MODULE_AUTHOR("Nobuhiro Iwamatsu <[email protected]>"); MODULE_DESCRIPTION("kirkwood thermal driver"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/kirkwood_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * uniphier_thermal.c - Socionext UniPhier thermal driver * Copyright 2014 Panasonic Corporation * Copyright 2016-2017 Socionext Inc. * Author: * Kunihiko Hayashi <[email protected]> */ #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/thermal.h> /* * block registers * addresses are the offset from .block_base */ #define PVTCTLEN 0x0000 #define PVTCTLEN_EN BIT(0) #define PVTCTLMODE 0x0004 #define PVTCTLMODE_MASK 0xf #define PVTCTLMODE_TEMPMON 0x5 #define EMONREPEAT 0x0040 #define EMONREPEAT_ENDLESS BIT(24) #define EMONREPEAT_PERIOD GENMASK(3, 0) #define EMONREPEAT_PERIOD_1000000 0x9 /* * common registers * addresses are the offset from .map_base */ #define PVTCTLSEL 0x0900 #define PVTCTLSEL_MASK GENMASK(2, 0) #define PVTCTLSEL_MONITOR 0 #define SETALERT0 0x0910 #define SETALERT1 0x0914 #define SETALERT2 0x0918 #define SETALERT_TEMP_OVF (GENMASK(7, 0) << 16) #define SETALERT_TEMP_OVF_VALUE(val) (((val) & GENMASK(7, 0)) << 16) #define SETALERT_EN BIT(0) #define PMALERTINTCTL 0x0920 #define PMALERTINTCTL_CLR(ch) BIT(4 * (ch) + 2) #define PMALERTINTCTL_SET(ch) BIT(4 * (ch) + 1) #define PMALERTINTCTL_EN(ch) BIT(4 * (ch) + 0) #define PMALERTINTCTL_MASK (GENMASK(10, 8) | GENMASK(6, 4) | \ GENMASK(2, 0)) #define TMOD 0x0928 #define TMOD_WIDTH 9 #define TMODCOEF 0x0e5c #define TMODSETUP0_EN BIT(30) #define TMODSETUP0_VAL(val) (((val) & GENMASK(13, 0)) << 16) #define TMODSETUP1_EN BIT(15) #define TMODSETUP1_VAL(val) ((val) & GENMASK(14, 0)) /* SoC critical temperature */ #define CRITICAL_TEMP_LIMIT (120 * 1000) /* Max # of alert channels */ #define ALERT_CH_NUM 3 /* SoC specific thermal sensor data */ struct uniphier_tm_soc_data { u32 map_base; u32 block_base; u32 tmod_setup_addr; }; struct uniphier_tm_dev { struct regmap *regmap; struct device *dev; bool alert_en[ALERT_CH_NUM]; struct thermal_zone_device *tz_dev; const struct uniphier_tm_soc_data *data; }; static int uniphier_tm_initialize_sensor(struct uniphier_tm_dev *tdev) { struct regmap *map = tdev->regmap; u32 val; u32 tmod_calib[2]; int ret; /* stop PVT */ regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, PVTCTLEN_EN, 0); /* * Since SoC has a calibrated value that was set in advance, * TMODCOEF shows non-zero and PVT refers the value internally. * * If TMODCOEF shows zero, the boards don't have the calibrated * value, and the driver has to set default value from DT. */ ret = regmap_read(map, tdev->data->map_base + TMODCOEF, &val); if (ret) return ret; if (!val) { /* look for the default values in DT */ ret = of_property_read_u32_array(tdev->dev->of_node, "socionext,tmod-calibration", tmod_calib, ARRAY_SIZE(tmod_calib)); if (ret) return ret; regmap_write(map, tdev->data->tmod_setup_addr, TMODSETUP0_EN | TMODSETUP0_VAL(tmod_calib[0]) | TMODSETUP1_EN | TMODSETUP1_VAL(tmod_calib[1])); } /* select temperature mode */ regmap_write_bits(map, tdev->data->block_base + PVTCTLMODE, PVTCTLMODE_MASK, PVTCTLMODE_TEMPMON); /* set monitoring period */ regmap_write_bits(map, tdev->data->block_base + EMONREPEAT, EMONREPEAT_ENDLESS | EMONREPEAT_PERIOD, EMONREPEAT_ENDLESS | EMONREPEAT_PERIOD_1000000); /* set monitor mode */ regmap_write_bits(map, tdev->data->map_base + PVTCTLSEL, PVTCTLSEL_MASK, PVTCTLSEL_MONITOR); return 0; } static void uniphier_tm_set_alert(struct uniphier_tm_dev *tdev, u32 ch, u32 temp) { struct regmap *map = tdev->regmap; /* set alert temperature */ regmap_write_bits(map, tdev->data->map_base + SETALERT0 + (ch << 2), SETALERT_EN | SETALERT_TEMP_OVF, SETALERT_EN | SETALERT_TEMP_OVF_VALUE(temp / 1000)); } static void uniphier_tm_enable_sensor(struct uniphier_tm_dev *tdev) { struct regmap *map = tdev->regmap; int i; u32 bits = 0; for (i = 0; i < ALERT_CH_NUM; i++) if (tdev->alert_en[i]) bits |= PMALERTINTCTL_EN(i); /* enable alert interrupt */ regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL, PMALERTINTCTL_MASK, bits); /* start PVT */ regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, PVTCTLEN_EN, PVTCTLEN_EN); usleep_range(700, 1500); /* The spec note says at least 700us */ } static void uniphier_tm_disable_sensor(struct uniphier_tm_dev *tdev) { struct regmap *map = tdev->regmap; /* disable alert interrupt */ regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL, PMALERTINTCTL_MASK, 0); /* stop PVT */ regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, PVTCTLEN_EN, 0); usleep_range(1000, 2000); /* The spec note says at least 1ms */ } static int uniphier_tm_get_temp(struct thermal_zone_device *tz, int *out_temp) { struct uniphier_tm_dev *tdev = thermal_zone_device_priv(tz); struct regmap *map = tdev->regmap; int ret; u32 temp; ret = regmap_read(map, tdev->data->map_base + TMOD, &temp); if (ret) return ret; /* MSB of the TMOD field is a sign bit */ *out_temp = sign_extend32(temp, TMOD_WIDTH - 1) * 1000; return 0; } static const struct thermal_zone_device_ops uniphier_of_thermal_ops = { .get_temp = uniphier_tm_get_temp, }; static void uniphier_tm_irq_clear(struct uniphier_tm_dev *tdev) { u32 mask = 0, bits = 0; int i; for (i = 0; i < ALERT_CH_NUM; i++) { mask |= (PMALERTINTCTL_CLR(i) | PMALERTINTCTL_SET(i)); bits |= PMALERTINTCTL_CLR(i); } /* clear alert interrupt */ regmap_write_bits(tdev->regmap, tdev->data->map_base + PMALERTINTCTL, mask, bits); } static irqreturn_t uniphier_tm_alarm_irq(int irq, void *_tdev) { struct uniphier_tm_dev *tdev = _tdev; disable_irq_nosync(irq); uniphier_tm_irq_clear(tdev); return IRQ_WAKE_THREAD; } static irqreturn_t uniphier_tm_alarm_irq_thread(int irq, void *_tdev) { struct uniphier_tm_dev *tdev = _tdev; thermal_zone_device_update(tdev->tz_dev, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static int uniphier_tm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct regmap *regmap; struct device_node *parent; struct uniphier_tm_dev *tdev; int i, ret, irq, crit_temp = INT_MAX; tdev = devm_kzalloc(dev, sizeof(*tdev), GFP_KERNEL); if (!tdev) return -ENOMEM; tdev->dev = dev; tdev->data = of_device_get_match_data(dev); if (WARN_ON(!tdev->data)) return -EINVAL; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; /* get regmap from syscon node */ parent = of_get_parent(dev->of_node); /* parent should be syscon node */ regmap = syscon_node_to_regmap(parent); of_node_put(parent); if (IS_ERR(regmap)) { dev_err(dev, "failed to get regmap (error %ld)\n", PTR_ERR(regmap)); return PTR_ERR(regmap); } tdev->regmap = regmap; ret = uniphier_tm_initialize_sensor(tdev); if (ret) { dev_err(dev, "failed to initialize sensor\n"); return ret; } ret = devm_request_threaded_irq(dev, irq, uniphier_tm_alarm_irq, uniphier_tm_alarm_irq_thread, 0, "thermal", tdev); if (ret) return ret; platform_set_drvdata(pdev, tdev); tdev->tz_dev = devm_thermal_of_zone_register(dev, 0, tdev, &uniphier_of_thermal_ops); if (IS_ERR(tdev->tz_dev)) { dev_err(dev, "failed to register sensor device\n"); return PTR_ERR(tdev->tz_dev); } /* set alert temperatures */ for (i = 0; i < thermal_zone_get_num_trips(tdev->tz_dev); i++) { struct thermal_trip trip; ret = thermal_zone_get_trip(tdev->tz_dev, i, &trip); if (ret) return ret; if (trip.type == THERMAL_TRIP_CRITICAL && trip.temperature < crit_temp) crit_temp = trip.temperature; uniphier_tm_set_alert(tdev, i, trip.temperature); tdev->alert_en[i] = true; } if (crit_temp > CRITICAL_TEMP_LIMIT) { dev_err(dev, "critical trip is over limit(>%d), or not set\n", CRITICAL_TEMP_LIMIT); return -EINVAL; } uniphier_tm_enable_sensor(tdev); return 0; } static int uniphier_tm_remove(struct platform_device *pdev) { struct uniphier_tm_dev *tdev = platform_get_drvdata(pdev); /* disable sensor */ uniphier_tm_disable_sensor(tdev); return 0; } static const struct uniphier_tm_soc_data uniphier_pxs2_tm_data = { .map_base = 0xe000, .block_base = 0xe000, .tmod_setup_addr = 0xe904, }; static const struct uniphier_tm_soc_data uniphier_ld20_tm_data = { .map_base = 0xe000, .block_base = 0xe800, .tmod_setup_addr = 0xe938, }; static const struct of_device_id uniphier_tm_dt_ids[] = { { .compatible = "socionext,uniphier-pxs2-thermal", .data = &uniphier_pxs2_tm_data, }, { .compatible = "socionext,uniphier-ld20-thermal", .data = &uniphier_ld20_tm_data, }, { .compatible = "socionext,uniphier-pxs3-thermal", .data = &uniphier_ld20_tm_data, }, { .compatible = "socionext,uniphier-nx1-thermal", .data = &uniphier_ld20_tm_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_tm_dt_ids); static struct platform_driver uniphier_tm_driver = { .probe = uniphier_tm_probe, .remove = uniphier_tm_remove, .driver = { .name = "uniphier-thermal", .of_match_table = uniphier_tm_dt_ids, }, }; module_platform_driver(uniphier_tm_driver); MODULE_AUTHOR("Kunihiko Hayashi <[email protected]>"); MODULE_DESCRIPTION("UniPhier thermal driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/uniphier_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. */ #include <linux/module.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/thermal.h> struct thermal_mmio { void __iomem *mmio_base; u32 (*read_mmio)(void __iomem *mmio_base); u32 mask; int factor; }; static u32 thermal_mmio_readb(void __iomem *mmio_base) { return readb(mmio_base); } static int thermal_mmio_get_temperature(struct thermal_zone_device *tz, int *temp) { int t; struct thermal_mmio *sensor = thermal_zone_device_priv(tz); t = sensor->read_mmio(sensor->mmio_base) & sensor->mask; t *= sensor->factor; *temp = t; return 0; } static const struct thermal_zone_device_ops thermal_mmio_ops = { .get_temp = thermal_mmio_get_temperature, }; static int thermal_mmio_probe(struct platform_device *pdev) { struct thermal_mmio *sensor; int (*sensor_init_func)(struct platform_device *pdev, struct thermal_mmio *sensor); struct thermal_zone_device *thermal_zone; int ret; int temperature; sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL); if (!sensor) return -ENOMEM; sensor->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(sensor->mmio_base)) return PTR_ERR(sensor->mmio_base); sensor_init_func = device_get_match_data(&pdev->dev); if (sensor_init_func) { ret = sensor_init_func(pdev, sensor); if (ret) { dev_err(&pdev->dev, "failed to initialize sensor (%d)\n", ret); return ret; } } thermal_zone = devm_thermal_of_zone_register(&pdev->dev, 0, sensor, &thermal_mmio_ops); if (IS_ERR(thermal_zone)) { dev_err(&pdev->dev, "failed to register sensor (%ld)\n", PTR_ERR(thermal_zone)); return PTR_ERR(thermal_zone); } thermal_mmio_get_temperature(thermal_zone, &temperature); dev_info(&pdev->dev, "thermal mmio sensor %s registered, current temperature: %d\n", pdev->name, temperature); return 0; } static int al_thermal_init(struct platform_device *pdev, struct thermal_mmio *sensor) { sensor->read_mmio = thermal_mmio_readb; sensor->mask = 0xff; sensor->factor = 1000; return 0; } static const struct of_device_id thermal_mmio_id_table[] = { { .compatible = "amazon,al-thermal", .data = al_thermal_init}, {} }; MODULE_DEVICE_TABLE(of, thermal_mmio_id_table); static struct platform_driver thermal_mmio_driver = { .probe = thermal_mmio_probe, .driver = { .name = "thermal-mmio", .of_match_table = thermal_mmio_id_table, }, }; module_platform_driver(thermal_mmio_driver); MODULE_AUTHOR("Talel Shenhar <[email protected]>"); MODULE_DESCRIPTION("Thermal MMIO Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/thermal_mmio.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 Spreadtrum Communications Inc. #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/thermal.h> #define SPRD_THM_CTL 0x0 #define SPRD_THM_INT_EN 0x4 #define SPRD_THM_INT_STS 0x8 #define SPRD_THM_INT_RAW_STS 0xc #define SPRD_THM_DET_PERIOD 0x10 #define SPRD_THM_INT_CLR 0x14 #define SPRD_THM_INT_CLR_ST 0x18 #define SPRD_THM_MON_PERIOD 0x4c #define SPRD_THM_MON_CTL 0x50 #define SPRD_THM_INTERNAL_STS1 0x54 #define SPRD_THM_RAW_READ_MSK 0x3ff #define SPRD_THM_OFFSET(id) ((id) * 0x4) #define SPRD_THM_TEMP(id) (SPRD_THM_OFFSET(id) + 0x5c) #define SPRD_THM_THRES(id) (SPRD_THM_OFFSET(id) + 0x2c) #define SPRD_THM_SEN(id) BIT((id) + 2) #define SPRD_THM_SEN_OVERHEAT_EN(id) BIT((id) + 8) #define SPRD_THM_SEN_OVERHEAT_ALARM_EN(id) BIT((id) + 0) /* bits definitions for register THM_CTL */ #define SPRD_THM_SET_RDY_ST BIT(13) #define SPRD_THM_SET_RDY BIT(12) #define SPRD_THM_MON_EN BIT(1) #define SPRD_THM_EN BIT(0) /* bits definitions for register THM_INT_CTL */ #define SPRD_THM_BIT_INT_EN BIT(26) #define SPRD_THM_OVERHEAT_EN BIT(25) #define SPRD_THM_OTP_TRIP_SHIFT 10 /* bits definitions for register SPRD_THM_INTERNAL_STS1 */ #define SPRD_THM_TEMPER_RDY BIT(0) #define SPRD_THM_DET_PERIOD_DATA 0x800 #define SPRD_THM_DET_PERIOD_MASK GENMASK(19, 0) #define SPRD_THM_MON_MODE 0x7 #define SPRD_THM_MON_MODE_MASK GENMASK(3, 0) #define SPRD_THM_MON_PERIOD_DATA 0x10 #define SPRD_THM_MON_PERIOD_MASK GENMASK(15, 0) #define SPRD_THM_THRES_MASK GENMASK(19, 0) #define SPRD_THM_INT_CLR_MASK GENMASK(24, 0) /* thermal sensor calibration parameters */ #define SPRD_THM_TEMP_LOW -40000 #define SPRD_THM_TEMP_HIGH 120000 #define SPRD_THM_OTP_TEMP 120000 #define SPRD_THM_HOT_TEMP 75000 #define SPRD_THM_RAW_DATA_LOW 0 #define SPRD_THM_RAW_DATA_HIGH 1000 #define SPRD_THM_SEN_NUM 8 #define SPRD_THM_DT_OFFSET 24 #define SPRD_THM_RATION_OFFSET 17 #define SPRD_THM_RATION_SIGN 16 #define SPRD_THM_RDYST_POLLING_TIME 10 #define SPRD_THM_RDYST_TIMEOUT 700 #define SPRD_THM_TEMP_READY_POLL_TIME 10000 #define SPRD_THM_TEMP_READY_TIMEOUT 600000 #define SPRD_THM_MAX_SENSOR 8 struct sprd_thermal_sensor { struct thermal_zone_device *tzd; struct sprd_thermal_data *data; struct device *dev; int cal_slope; int cal_offset; int id; }; struct sprd_thermal_data { const struct sprd_thm_variant_data *var_data; struct sprd_thermal_sensor *sensor[SPRD_THM_MAX_SENSOR]; struct clk *clk; void __iomem *base; u32 ratio_off; int ratio_sign; int nr_sensors; }; /* * The conversion between ADC and temperature is based on linear relationship, * and use idea_k to specify the slope and ideal_b to specify the offset. * * Since different Spreadtrum SoCs have different ideal_k and ideal_b, * we should save ideal_k and ideal_b in the device data structure. */ struct sprd_thm_variant_data { u32 ideal_k; u32 ideal_b; }; static const struct sprd_thm_variant_data ums512_data = { .ideal_k = 262, .ideal_b = 66400, }; static inline void sprd_thm_update_bits(void __iomem *reg, u32 mask, u32 val) { u32 tmp, orig; orig = readl(reg); tmp = orig & ~mask; tmp |= val & mask; writel(tmp, reg); } static int sprd_thm_cal_read(struct device_node *np, const char *cell_id, u32 *val) { struct nvmem_cell *cell; void *buf; size_t len; cell = of_nvmem_cell_get(np, cell_id); if (IS_ERR(cell)) return PTR_ERR(cell); buf = nvmem_cell_read(cell, &len); nvmem_cell_put(cell); if (IS_ERR(buf)) return PTR_ERR(buf); if (len > sizeof(u32)) { kfree(buf); return -EINVAL; } memcpy(val, buf, len); kfree(buf); return 0; } static int sprd_thm_sensor_calibration(struct device_node *np, struct sprd_thermal_data *thm, struct sprd_thermal_sensor *sen) { int ret; /* * According to thermal datasheet, the default calibration offset is 64, * and the default ratio is 1000. */ int dt_offset = 64, ratio = 1000; ret = sprd_thm_cal_read(np, "sen_delta_cal", &dt_offset); if (ret) return ret; ratio += thm->ratio_sign * thm->ratio_off; /* * According to the ideal slope K and ideal offset B, combined with * calibration value of thermal from efuse, then calibrate the real * slope k and offset b: * k_cal = (k * ratio) / 1000. * b_cal = b + (dt_offset - 64) * 500. */ sen->cal_slope = (thm->var_data->ideal_k * ratio) / 1000; sen->cal_offset = thm->var_data->ideal_b + (dt_offset - 128) * 250; return 0; } static int sprd_thm_rawdata_to_temp(struct sprd_thermal_sensor *sen, u32 rawdata) { clamp(rawdata, (u32)SPRD_THM_RAW_DATA_LOW, (u32)SPRD_THM_RAW_DATA_HIGH); /* * According to the thermal datasheet, the formula of converting * adc value to the temperature value should be: * T_final = k_cal * x - b_cal. */ return sen->cal_slope * rawdata - sen->cal_offset; } static int sprd_thm_temp_to_rawdata(int temp, struct sprd_thermal_sensor *sen) { u32 val; clamp(temp, (int)SPRD_THM_TEMP_LOW, (int)SPRD_THM_TEMP_HIGH); /* * According to the thermal datasheet, the formula of converting * adc value to the temperature value should be: * T_final = k_cal * x - b_cal. */ val = (temp + sen->cal_offset) / sen->cal_slope; return clamp(val, val, (u32)(SPRD_THM_RAW_DATA_HIGH - 1)); } static int sprd_thm_read_temp(struct thermal_zone_device *tz, int *temp) { struct sprd_thermal_sensor *sen = thermal_zone_device_priv(tz); u32 data; data = readl(sen->data->base + SPRD_THM_TEMP(sen->id)) & SPRD_THM_RAW_READ_MSK; *temp = sprd_thm_rawdata_to_temp(sen, data); return 0; } static const struct thermal_zone_device_ops sprd_thm_ops = { .get_temp = sprd_thm_read_temp, }; static int sprd_thm_poll_ready_status(struct sprd_thermal_data *thm) { u32 val; int ret; /* * Wait for thermal ready status before configuring thermal parameters. */ ret = readl_poll_timeout(thm->base + SPRD_THM_CTL, val, !(val & SPRD_THM_SET_RDY_ST), SPRD_THM_RDYST_POLLING_TIME, SPRD_THM_RDYST_TIMEOUT); if (ret) return ret; sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_MON_EN, SPRD_THM_MON_EN); sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SET_RDY, SPRD_THM_SET_RDY); return 0; } static int sprd_thm_wait_temp_ready(struct sprd_thermal_data *thm) { u32 val; /* Wait for first temperature data ready before reading temperature */ return readl_poll_timeout(thm->base + SPRD_THM_INTERNAL_STS1, val, !(val & SPRD_THM_TEMPER_RDY), SPRD_THM_TEMP_READY_POLL_TIME, SPRD_THM_TEMP_READY_TIMEOUT); } static int sprd_thm_set_ready(struct sprd_thermal_data *thm) { int ret; ret = sprd_thm_poll_ready_status(thm); if (ret) return ret; /* * Clear interrupt status, enable thermal interrupt and enable thermal. * * The SPRD thermal controller integrates a hardware interrupt signal, * which means if the temperature is overheat, it will generate an * interrupt and notify the event to PMIC automatically to shutdown the * system. So here we should enable the interrupt bits, though we have * not registered an irq handler. */ writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR); sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN, SPRD_THM_BIT_INT_EN, SPRD_THM_BIT_INT_EN); sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_EN, SPRD_THM_EN); return 0; } static void sprd_thm_sensor_init(struct sprd_thermal_data *thm, struct sprd_thermal_sensor *sen) { u32 otp_rawdata, hot_rawdata; otp_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_OTP_TEMP, sen); hot_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_HOT_TEMP, sen); /* Enable the sensor' overheat temperature protection interrupt */ sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN, SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id), SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id)); /* Set the sensor' overheat and hot threshold temperature */ sprd_thm_update_bits(thm->base + SPRD_THM_THRES(sen->id), SPRD_THM_THRES_MASK, (otp_rawdata << SPRD_THM_OTP_TRIP_SHIFT) | hot_rawdata); /* Enable the corresponding sensor */ sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SEN(sen->id), SPRD_THM_SEN(sen->id)); } static void sprd_thm_para_config(struct sprd_thermal_data *thm) { /* Set the period of two valid temperature detection action */ sprd_thm_update_bits(thm->base + SPRD_THM_DET_PERIOD, SPRD_THM_DET_PERIOD_MASK, SPRD_THM_DET_PERIOD); /* Set the sensors' monitor mode */ sprd_thm_update_bits(thm->base + SPRD_THM_MON_CTL, SPRD_THM_MON_MODE_MASK, SPRD_THM_MON_MODE); /* Set the sensors' monitor period */ sprd_thm_update_bits(thm->base + SPRD_THM_MON_PERIOD, SPRD_THM_MON_PERIOD_MASK, SPRD_THM_MON_PERIOD); } static void sprd_thm_toggle_sensor(struct sprd_thermal_sensor *sen, bool on) { struct thermal_zone_device *tzd = sen->tzd; if (on) thermal_zone_device_enable(tzd); else thermal_zone_device_disable(tzd); } static int sprd_thm_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *sen_child; struct sprd_thermal_data *thm; struct sprd_thermal_sensor *sen; const struct sprd_thm_variant_data *pdata; int ret, i; u32 val; pdata = of_device_get_match_data(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "No matching driver data found\n"); return -EINVAL; } thm = devm_kzalloc(&pdev->dev, sizeof(*thm), GFP_KERNEL); if (!thm) return -ENOMEM; thm->var_data = pdata; thm->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(thm->base)) return PTR_ERR(thm->base); thm->nr_sensors = of_get_child_count(np); if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) { dev_err(&pdev->dev, "incorrect sensor count\n"); return -EINVAL; } thm->clk = devm_clk_get(&pdev->dev, "enable"); if (IS_ERR(thm->clk)) { dev_err(&pdev->dev, "failed to get enable clock\n"); return PTR_ERR(thm->clk); } ret = clk_prepare_enable(thm->clk); if (ret) return ret; sprd_thm_para_config(thm); ret = sprd_thm_cal_read(np, "thm_sign_cal", &val); if (ret) goto disable_clk; if (val > 0) thm->ratio_sign = -1; else thm->ratio_sign = 1; ret = sprd_thm_cal_read(np, "thm_ratio_cal", &thm->ratio_off); if (ret) goto disable_clk; for_each_child_of_node(np, sen_child) { sen = devm_kzalloc(&pdev->dev, sizeof(*sen), GFP_KERNEL); if (!sen) { ret = -ENOMEM; goto of_put; } sen->data = thm; sen->dev = &pdev->dev; ret = of_property_read_u32(sen_child, "reg", &sen->id); if (ret) { dev_err(&pdev->dev, "get sensor reg failed"); goto of_put; } ret = sprd_thm_sensor_calibration(sen_child, thm, sen); if (ret) { dev_err(&pdev->dev, "efuse cal analysis failed"); goto of_put; } sprd_thm_sensor_init(thm, sen); sen->tzd = devm_thermal_of_zone_register(sen->dev, sen->id, sen, &sprd_thm_ops); if (IS_ERR(sen->tzd)) { dev_err(&pdev->dev, "register thermal zone failed %d\n", sen->id); ret = PTR_ERR(sen->tzd); goto of_put; } thm->sensor[sen->id] = sen; } /* sen_child set to NULL at this point */ ret = sprd_thm_set_ready(thm); if (ret) goto of_put; ret = sprd_thm_wait_temp_ready(thm); if (ret) goto of_put; for (i = 0; i < thm->nr_sensors; i++) sprd_thm_toggle_sensor(thm->sensor[i], true); platform_set_drvdata(pdev, thm); return 0; of_put: of_node_put(sen_child); disable_clk: clk_disable_unprepare(thm->clk); return ret; } #ifdef CONFIG_PM_SLEEP static void sprd_thm_hw_suspend(struct sprd_thermal_data *thm) { int i; for (i = 0; i < thm->nr_sensors; i++) { sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SEN(thm->sensor[i]->id), 0); } sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_EN, 0x0); } static int sprd_thm_suspend(struct device *dev) { struct sprd_thermal_data *thm = dev_get_drvdata(dev); int i; for (i = 0; i < thm->nr_sensors; i++) sprd_thm_toggle_sensor(thm->sensor[i], false); sprd_thm_hw_suspend(thm); clk_disable_unprepare(thm->clk); return 0; } static int sprd_thm_hw_resume(struct sprd_thermal_data *thm) { int ret, i; for (i = 0; i < thm->nr_sensors; i++) { sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SEN(thm->sensor[i]->id), SPRD_THM_SEN(thm->sensor[i]->id)); } ret = sprd_thm_poll_ready_status(thm); if (ret) return ret; writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR); sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_EN, SPRD_THM_EN); return sprd_thm_wait_temp_ready(thm); } static int sprd_thm_resume(struct device *dev) { struct sprd_thermal_data *thm = dev_get_drvdata(dev); int ret, i; ret = clk_prepare_enable(thm->clk); if (ret) return ret; ret = sprd_thm_hw_resume(thm); if (ret) goto disable_clk; for (i = 0; i < thm->nr_sensors; i++) sprd_thm_toggle_sensor(thm->sensor[i], true); return 0; disable_clk: clk_disable_unprepare(thm->clk); return ret; } #endif static int sprd_thm_remove(struct platform_device *pdev) { struct sprd_thermal_data *thm = platform_get_drvdata(pdev); int i; for (i = 0; i < thm->nr_sensors; i++) { sprd_thm_toggle_sensor(thm->sensor[i], false); devm_thermal_of_zone_unregister(&pdev->dev, thm->sensor[i]->tzd); } clk_disable_unprepare(thm->clk); return 0; } static const struct of_device_id sprd_thermal_of_match[] = { { .compatible = "sprd,ums512-thermal", .data = &ums512_data }, { }, }; MODULE_DEVICE_TABLE(of, sprd_thermal_of_match); static const struct dev_pm_ops sprd_thermal_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sprd_thm_suspend, sprd_thm_resume) }; static struct platform_driver sprd_thermal_driver = { .probe = sprd_thm_probe, .remove = sprd_thm_remove, .driver = { .name = "sprd-thermal", .pm = &sprd_thermal_pm_ops, .of_match_table = sprd_thermal_of_match, }, }; module_platform_driver(sprd_thermal_driver); MODULE_AUTHOR("Freeman Liu <[email protected]>"); MODULE_DESCRIPTION("Spreadtrum thermal driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/sprd_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/thermal/cpufreq_cooling.c * * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) * * Copyright (C) 2012-2018 Linaro Limited. * * Authors: Amit Daniel <[email protected]> * Viresh Kumar <[email protected]> * */ #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpu_cooling.h> #include <linux/device.h> #include <linux/energy_model.h> #include <linux/err.h> #include <linux/export.h> #include <linux/pm_opp.h> #include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/thermal.h> #include <linux/units.h> #include "thermal_trace.h" /* * Cooling state <-> CPUFreq frequency * * Cooling states are translated to frequencies throughout this driver and this * is the relation between them. * * Highest cooling state corresponds to lowest possible frequency. * * i.e. * level 0 --> 1st Max Freq * level 1 --> 2nd Max Freq * ... */ /** * struct time_in_idle - Idle time stats * @time: previous reading of the absolute time that this cpu was idle * @timestamp: wall time of the last invocation of get_cpu_idle_time_us() */ struct time_in_idle { u64 time; u64 timestamp; }; /** * struct cpufreq_cooling_device - data for cooling device with cpufreq * @last_load: load measured by the latest call to cpufreq_get_requested_power() * @cpufreq_state: integer value representing the current state of cpufreq * cooling devices. * @max_level: maximum cooling level. One less than total number of valid * cpufreq frequencies. * @em: Reference on the Energy Model of the device * @cdev: thermal_cooling_device pointer to keep track of the * registered cooling device. * @policy: cpufreq policy. * @cooling_ops: cpufreq callbacks to thermal cooling device ops * @idle_time: idle time stats * @qos_req: PM QoS contraint to apply * * This structure is required for keeping information of each registered * cpufreq_cooling_device. */ struct cpufreq_cooling_device { u32 last_load; unsigned int cpufreq_state; unsigned int max_level; struct em_perf_domain *em; struct cpufreq_policy *policy; struct thermal_cooling_device_ops cooling_ops; #ifndef CONFIG_SMP struct time_in_idle *idle_time; #endif struct freq_qos_request qos_req; }; #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR /** * get_level: Find the level for a particular frequency * @cpufreq_cdev: cpufreq_cdev for which the property is required * @freq: Frequency * * Return: level corresponding to the frequency. */ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev, unsigned int freq) { int i; for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { if (freq > cpufreq_cdev->em->table[i].frequency) break; } return cpufreq_cdev->max_level - i - 1; } static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev, u32 freq) { unsigned long power_mw; int i; for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { if (freq > cpufreq_cdev->em->table[i].frequency) break; } power_mw = cpufreq_cdev->em->table[i + 1].power; power_mw /= MICROWATT_PER_MILLIWATT; return power_mw; } static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, u32 power) { unsigned long em_power_mw; int i; for (i = cpufreq_cdev->max_level; i > 0; i--) { /* Convert EM power to milli-Watts to make safe comparison */ em_power_mw = cpufreq_cdev->em->table[i].power; em_power_mw /= MICROWATT_PER_MILLIWATT; if (power >= em_power_mw) break; } return cpufreq_cdev->em->table[i].frequency; } /** * get_load() - get load for a cpu * @cpufreq_cdev: struct cpufreq_cooling_device for the cpu * @cpu: cpu number * @cpu_idx: index of the cpu in time_in_idle array * * Return: The average load of cpu @cpu in percentage since this * function was last called. */ #ifdef CONFIG_SMP static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, int cpu_idx) { unsigned long util = sched_cpu_util(cpu); return (util * 100) / arch_scale_cpu_capacity(cpu); } #else /* !CONFIG_SMP */ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, int cpu_idx) { u32 load; u64 now, now_idle, delta_time, delta_idle; struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx]; now_idle = get_cpu_idle_time(cpu, &now, 0); delta_idle = now_idle - idle_time->time; delta_time = now - idle_time->timestamp; if (delta_time <= delta_idle) load = 0; else load = div64_u64(100 * (delta_time - delta_idle), delta_time); idle_time->time = now_idle; idle_time->timestamp = now; return load; } #endif /* CONFIG_SMP */ /** * get_dynamic_power() - calculate the dynamic power * @cpufreq_cdev: &cpufreq_cooling_device for this cdev * @freq: current frequency * * Return: the dynamic power consumed by the cpus described by * @cpufreq_cdev. */ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev, unsigned long freq) { u32 raw_cpu_power; raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq); return (raw_cpu_power * cpufreq_cdev->last_load) / 100; } /** * cpufreq_get_requested_power() - get the current power * @cdev: &thermal_cooling_device pointer * @power: pointer in which to store the resulting power * * Calculate the current power consumption of the cpus in milliwatts * and store it in @power. This function should actually calculate * the requested power, but it's hard to get the frequency that * cpufreq would have assigned if there were no thermal limits. * Instead, we calculate the current power on the assumption that the * immediate future will look like the immediate past. * * We use the current frequency and the average load since this * function was last called. In reality, there could have been * multiple opps since this function was last called and that affects * the load calculation. While it's not perfectly accurate, this * simplification is good enough and works. REVISIT this, as more * complex code may be needed if experiments show that it's not * accurate enough. * * Return: 0 on success, this function doesn't fail. */ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, u32 *power) { unsigned long freq; int i = 0, cpu; u32 total_load = 0; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_policy *policy = cpufreq_cdev->policy; freq = cpufreq_quick_get(policy->cpu); for_each_cpu(cpu, policy->related_cpus) { u32 load; if (cpu_online(cpu)) load = get_load(cpufreq_cdev, cpu, i); else load = 0; total_load += load; } cpufreq_cdev->last_load = total_load; *power = get_dynamic_power(cpufreq_cdev, freq); trace_thermal_power_cpu_get_power_simple(policy->cpu, *power); return 0; } /** * cpufreq_state2power() - convert a cpu cdev state to power consumed * @cdev: &thermal_cooling_device pointer * @state: cooling device state to be converted * @power: pointer in which to store the resulting power * * Convert cooling device state @state into power consumption in * milliwatts assuming 100% load. Store the calculated power in * @power. * * Return: 0 on success, -EINVAL if the cooling device state is bigger * than maximum allowed. */ static int cpufreq_state2power(struct thermal_cooling_device *cdev, unsigned long state, u32 *power) { unsigned int freq, num_cpus, idx; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; /* Request state should be less than max_level */ if (state > cpufreq_cdev->max_level) return -EINVAL; num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); idx = cpufreq_cdev->max_level - state; freq = cpufreq_cdev->em->table[idx].frequency; *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus; return 0; } /** * cpufreq_power2state() - convert power to a cooling device state * @cdev: &thermal_cooling_device pointer * @power: power in milliwatts to be converted * @state: pointer in which to store the resulting state * * Calculate a cooling device state for the cpus described by @cdev * that would allow them to consume at most @power mW and store it in * @state. Note that this calculation depends on external factors * such as the CPUs load. Calling this function with the same power * as input can yield different cooling device states depending on those * external factors. * * Return: 0 on success, this function doesn't fail. */ static int cpufreq_power2state(struct thermal_cooling_device *cdev, u32 power, unsigned long *state) { unsigned int target_freq; u32 last_load, normalised_power; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_policy *policy = cpufreq_cdev->policy; last_load = cpufreq_cdev->last_load ?: 1; normalised_power = (power * 100) / last_load; target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power); *state = get_level(cpufreq_cdev, target_freq); trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state, power); return 0; } static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev, struct em_perf_domain *em) { struct cpufreq_policy *policy; unsigned int nr_levels; if (!em || em_is_artificial(em)) return false; policy = cpufreq_cdev->policy; if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) { pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n", cpumask_pr_args(em_span_cpus(em)), cpumask_pr_args(policy->related_cpus)); return false; } nr_levels = cpufreq_cdev->max_level + 1; if (em_pd_nr_perf_states(em) != nr_levels) { pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", cpumask_pr_args(em_span_cpus(em)), em_pd_nr_perf_states(em), nr_levels); return false; } return true; } #endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */ #ifdef CONFIG_SMP static inline int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) { return 0; } static inline void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) { } #else static int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) { unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); cpufreq_cdev->idle_time = kcalloc(num_cpus, sizeof(*cpufreq_cdev->idle_time), GFP_KERNEL); if (!cpufreq_cdev->idle_time) return -ENOMEM; return 0; } static void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) { kfree(cpufreq_cdev->idle_time); cpufreq_cdev->idle_time = NULL; } #endif /* CONFIG_SMP */ static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev, unsigned long state) { struct cpufreq_policy *policy; unsigned long idx; #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR /* Use the Energy Model table if available */ if (cpufreq_cdev->em) { idx = cpufreq_cdev->max_level - state; return cpufreq_cdev->em->table[idx].frequency; } #endif /* Otherwise, fallback on the CPUFreq table */ policy = cpufreq_cdev->policy; if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) idx = cpufreq_cdev->max_level - state; else idx = state; return policy->freq_table[idx].frequency; } /* cpufreq cooling device callback functions are defined below */ /** * cpufreq_get_max_state - callback function to get the max cooling state. * @cdev: thermal cooling device pointer. * @state: fill this variable with the max cooling state. * * Callback for the thermal cooling device to return the cpufreq * max cooling state. * * Return: 0 on success, this function doesn't fail. */ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; *state = cpufreq_cdev->max_level; return 0; } /** * cpufreq_get_cur_state - callback function to get the current cooling state. * @cdev: thermal cooling device pointer. * @state: fill this variable with the current cooling state. * * Callback for the thermal cooling device to return the cpufreq * current cooling state. * * Return: 0 on success, this function doesn't fail. */ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; *state = cpufreq_cdev->cpufreq_state; return 0; } /** * cpufreq_set_cur_state - callback function to set the current cooling state. * @cdev: thermal cooling device pointer. * @state: set this variable to the current cooling state. * * Callback for the thermal cooling device to change the cpufreq * current cooling state. * * Return: 0 on success, an error code otherwise. */ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpumask *cpus; unsigned int frequency; int ret; /* Request state should be less than max_level */ if (state > cpufreq_cdev->max_level) return -EINVAL; /* Check if the old cooling action is same as new cooling action */ if (cpufreq_cdev->cpufreq_state == state) return 0; frequency = get_state_freq(cpufreq_cdev, state); ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency); if (ret >= 0) { cpufreq_cdev->cpufreq_state = state; cpus = cpufreq_cdev->policy->related_cpus; arch_update_thermal_pressure(cpus, frequency); ret = 0; } return ret; } /** * __cpufreq_cooling_register - helper function to create cpufreq cooling device * @np: a valid struct device_node to the cooling device tree node * @policy: cpufreq policy * Normally this should be same as cpufreq policy->related_cpus. * @em: Energy Model of the cpufreq policy * * This interface function registers the cpufreq cooling device with the name * "cpufreq-%s". This API can support multiple instances of cpufreq * cooling devices. It also gives the opportunity to link the cooling device * with a device tree node, in order to bind it via the thermal DT code. * * Return: a valid struct thermal_cooling_device pointer on success, * on failure, it returns a corresponding ERR_PTR(). */ static struct thermal_cooling_device * __cpufreq_cooling_register(struct device_node *np, struct cpufreq_policy *policy, struct em_perf_domain *em) { struct thermal_cooling_device *cdev; struct cpufreq_cooling_device *cpufreq_cdev; unsigned int i; struct device *dev; int ret; struct thermal_cooling_device_ops *cooling_ops; char *name; if (IS_ERR_OR_NULL(policy)) { pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy); return ERR_PTR(-EINVAL); } dev = get_cpu_device(policy->cpu); if (unlikely(!dev)) { pr_warn("No cpu device for cpu %d\n", policy->cpu); return ERR_PTR(-ENODEV); } i = cpufreq_table_count_valid_entries(policy); if (!i) { pr_debug("%s: CPUFreq table not found or has no valid entries\n", __func__); return ERR_PTR(-ENODEV); } cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL); if (!cpufreq_cdev) return ERR_PTR(-ENOMEM); cpufreq_cdev->policy = policy; ret = allocate_idle_time(cpufreq_cdev); if (ret) { cdev = ERR_PTR(ret); goto free_cdev; } /* max_level is an index, not a counter */ cpufreq_cdev->max_level = i - 1; cooling_ops = &cpufreq_cdev->cooling_ops; cooling_ops->get_max_state = cpufreq_get_max_state; cooling_ops->get_cur_state = cpufreq_get_cur_state; cooling_ops->set_cur_state = cpufreq_set_cur_state; #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR if (em_is_sane(cpufreq_cdev, em)) { cpufreq_cdev->em = em; cooling_ops->get_requested_power = cpufreq_get_requested_power; cooling_ops->state2power = cpufreq_state2power; cooling_ops->power2state = cpufreq_power2state; } else #endif if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) { pr_err("%s: unsorted frequency tables are not supported\n", __func__); cdev = ERR_PTR(-EINVAL); goto free_idle_time; } ret = freq_qos_add_request(&policy->constraints, &cpufreq_cdev->qos_req, FREQ_QOS_MAX, get_state_freq(cpufreq_cdev, 0)); if (ret < 0) { pr_err("%s: Failed to add freq constraint (%d)\n", __func__, ret); cdev = ERR_PTR(ret); goto free_idle_time; } cdev = ERR_PTR(-ENOMEM); name = kasprintf(GFP_KERNEL, "cpufreq-%s", dev_name(dev)); if (!name) goto remove_qos_req; cdev = thermal_of_cooling_device_register(np, name, cpufreq_cdev, cooling_ops); kfree(name); if (IS_ERR(cdev)) goto remove_qos_req; return cdev; remove_qos_req: freq_qos_remove_request(&cpufreq_cdev->qos_req); free_idle_time: free_idle_time(cpufreq_cdev); free_cdev: kfree(cpufreq_cdev); return cdev; } /** * cpufreq_cooling_register - function to create cpufreq cooling device. * @policy: cpufreq policy * * This interface function registers the cpufreq cooling device with the name * "cpufreq-%s". This API can support multiple instances of cpufreq cooling * devices. * * Return: a valid struct thermal_cooling_device pointer on success, * on failure, it returns a corresponding ERR_PTR(). */ struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy) { return __cpufreq_cooling_register(NULL, policy, NULL); } EXPORT_SYMBOL_GPL(cpufreq_cooling_register); /** * of_cpufreq_cooling_register - function to create cpufreq cooling device. * @policy: cpufreq policy * * This interface function registers the cpufreq cooling device with the name * "cpufreq-%s". This API can support multiple instances of cpufreq cooling * devices. Using this API, the cpufreq cooling device will be linked to the * device tree node provided. * * Using this function, the cooling device will implement the power * extensions by using the Energy Model (if present). The cpus must have * registered their OPPs using the OPP library. * * Return: a valid struct thermal_cooling_device pointer on success, * and NULL on failure. */ struct thermal_cooling_device * of_cpufreq_cooling_register(struct cpufreq_policy *policy) { struct device_node *np = of_get_cpu_node(policy->cpu, NULL); struct thermal_cooling_device *cdev = NULL; if (!np) { pr_err("cpufreq_cooling: OF node not available for cpu%d\n", policy->cpu); return NULL; } if (of_property_present(np, "#cooling-cells")) { struct em_perf_domain *em = em_cpu_get(policy->cpu); cdev = __cpufreq_cooling_register(np, policy, em); if (IS_ERR(cdev)) { pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n", policy->cpu, PTR_ERR(cdev)); cdev = NULL; } } of_node_put(np); return cdev; } EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. * * This interface function unregisters the "cpufreq-%x" cooling device. */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { struct cpufreq_cooling_device *cpufreq_cdev; if (!cdev) return; cpufreq_cdev = cdev->devdata; thermal_cooling_device_unregister(cdev); freq_qos_remove_request(&cpufreq_cdev->qos_req); free_idle_time(cpufreq_cdev); kfree(cpufreq_cdev); } EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
linux-master
drivers/thermal/cpufreq_cooling.c
// SPDX-License-Identifier: GPL-2.0-only /* * HiSilicon thermal sensor driver * * Copyright (c) 2014-2015 HiSilicon Limited. * Copyright (c) 2014-2015 Linaro Limited. * * Xinwei Kong <[email protected]> * Leo Yan <[email protected]> */ #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/thermal.h> #define HI6220_TEMP0_LAG (0x0) #define HI6220_TEMP0_TH (0x4) #define HI6220_TEMP0_RST_TH (0x8) #define HI6220_TEMP0_CFG (0xC) #define HI6220_TEMP0_CFG_SS_MSK (0xF000) #define HI6220_TEMP0_CFG_HDAK_MSK (0x30) #define HI6220_TEMP0_EN (0x10) #define HI6220_TEMP0_INT_EN (0x14) #define HI6220_TEMP0_INT_CLR (0x18) #define HI6220_TEMP0_RST_MSK (0x1C) #define HI6220_TEMP0_VALUE (0x28) #define HI3660_OFFSET(chan) ((chan) * 0x40) #define HI3660_TEMP(chan) (HI3660_OFFSET(chan) + 0x1C) #define HI3660_TH(chan) (HI3660_OFFSET(chan) + 0x20) #define HI3660_LAG(chan) (HI3660_OFFSET(chan) + 0x28) #define HI3660_INT_EN(chan) (HI3660_OFFSET(chan) + 0x2C) #define HI3660_INT_CLR(chan) (HI3660_OFFSET(chan) + 0x30) #define HI6220_TEMP_BASE (-60000) #define HI6220_TEMP_RESET (100000) #define HI6220_TEMP_STEP (785) #define HI6220_TEMP_LAG (3500) #define HI3660_TEMP_BASE (-63780) #define HI3660_TEMP_STEP (205) #define HI3660_TEMP_LAG (4000) #define HI6220_CLUSTER0_SENSOR 2 #define HI6220_CLUSTER1_SENSOR 1 #define HI3660_LITTLE_SENSOR 0 #define HI3660_BIG_SENSOR 1 #define HI3660_G3D_SENSOR 2 #define HI3660_MODEM_SENSOR 3 struct hisi_thermal_data; struct hisi_thermal_sensor { struct hisi_thermal_data *data; struct thermal_zone_device *tzd; const char *irq_name; uint32_t id; uint32_t thres_temp; }; struct hisi_thermal_ops { int (*get_temp)(struct hisi_thermal_sensor *sensor); int (*enable_sensor)(struct hisi_thermal_sensor *sensor); int (*disable_sensor)(struct hisi_thermal_sensor *sensor); int (*irq_handler)(struct hisi_thermal_sensor *sensor); int (*probe)(struct hisi_thermal_data *data); }; struct hisi_thermal_data { const struct hisi_thermal_ops *ops; struct hisi_thermal_sensor *sensor; struct platform_device *pdev; struct clk *clk; void __iomem *regs; int nr_sensors; }; /* * The temperature computation on the tsensor is as follow: * Unit: millidegree Celsius * Step: 200/255 (0.7843) * Temperature base: -60°C * * The register is programmed in temperature steps, every step is 785 * millidegree and begins at -60 000 m°C * * The temperature from the steps: * * Temp = TempBase + (steps x 785) * * and the steps from the temperature: * * steps = (Temp - TempBase) / 785 * */ static inline int hi6220_thermal_step_to_temp(int step) { return HI6220_TEMP_BASE + (step * HI6220_TEMP_STEP); } static inline int hi6220_thermal_temp_to_step(int temp) { return DIV_ROUND_UP(temp - HI6220_TEMP_BASE, HI6220_TEMP_STEP); } /* * for Hi3660, * Step: 189/922 (0.205) * Temperature base: -63.780°C * * The register is programmed in temperature steps, every step is 205 * millidegree and begins at -63 780 m°C */ static inline int hi3660_thermal_step_to_temp(int step) { return HI3660_TEMP_BASE + step * HI3660_TEMP_STEP; } static inline int hi3660_thermal_temp_to_step(int temp) { return DIV_ROUND_UP(temp - HI3660_TEMP_BASE, HI3660_TEMP_STEP); } /* * The lag register contains 5 bits encoding the temperature in steps. * * Each time the temperature crosses the threshold boundary, an * interrupt is raised. It could be when the temperature is going * above the threshold or below. However, if the temperature is * fluctuating around this value due to the load, we can receive * several interrupts which may not desired. * * We can setup a temperature representing the delta between the * threshold and the current temperature when the temperature is * decreasing. * * For instance: the lag register is 5°C, the threshold is 65°C, when * the temperature reaches 65°C an interrupt is raised and when the * temperature decrease to 65°C - 5°C another interrupt is raised. * * A very short lag can lead to an interrupt storm, a long lag * increase the latency to react to the temperature changes. In our * case, that is not really a problem as we are polling the * temperature. * * [0:4] : lag register * * The temperature is coded in steps, cf. HI6220_TEMP_STEP. * * Min : 0x00 : 0.0 °C * Max : 0x1F : 24.3 °C * * The 'value' parameter is in milliCelsius. */ static inline void hi6220_thermal_set_lag(void __iomem *addr, int value) { writel(DIV_ROUND_UP(value, HI6220_TEMP_STEP) & 0x1F, addr + HI6220_TEMP0_LAG); } static inline void hi6220_thermal_alarm_clear(void __iomem *addr, int value) { writel(value, addr + HI6220_TEMP0_INT_CLR); } static inline void hi6220_thermal_alarm_enable(void __iomem *addr, int value) { writel(value, addr + HI6220_TEMP0_INT_EN); } static inline void hi6220_thermal_alarm_set(void __iomem *addr, int temp) { writel(hi6220_thermal_temp_to_step(temp) | 0x0FFFFFF00, addr + HI6220_TEMP0_TH); } static inline void hi6220_thermal_reset_set(void __iomem *addr, int temp) { writel(hi6220_thermal_temp_to_step(temp), addr + HI6220_TEMP0_RST_TH); } static inline void hi6220_thermal_reset_enable(void __iomem *addr, int value) { writel(value, addr + HI6220_TEMP0_RST_MSK); } static inline void hi6220_thermal_enable(void __iomem *addr, int value) { writel(value, addr + HI6220_TEMP0_EN); } static inline int hi6220_thermal_get_temperature(void __iomem *addr) { return hi6220_thermal_step_to_temp(readl(addr + HI6220_TEMP0_VALUE)); } /* * [0:6] lag register * * The temperature is coded in steps, cf. HI3660_TEMP_STEP. * * Min : 0x00 : 0.0 °C * Max : 0x7F : 26.0 °C * */ static inline void hi3660_thermal_set_lag(void __iomem *addr, int id, int value) { writel(DIV_ROUND_UP(value, HI3660_TEMP_STEP) & 0x7F, addr + HI3660_LAG(id)); } static inline void hi3660_thermal_alarm_clear(void __iomem *addr, int id, int value) { writel(value, addr + HI3660_INT_CLR(id)); } static inline void hi3660_thermal_alarm_enable(void __iomem *addr, int id, int value) { writel(value, addr + HI3660_INT_EN(id)); } static inline void hi3660_thermal_alarm_set(void __iomem *addr, int id, int value) { writel(value, addr + HI3660_TH(id)); } static inline int hi3660_thermal_get_temperature(void __iomem *addr, int id) { return hi3660_thermal_step_to_temp(readl(addr + HI3660_TEMP(id))); } /* * Temperature configuration register - Sensor selection * * Bits [19:12] * * 0x0: local sensor (default) * 0x1: remote sensor 1 (ACPU cluster 1) * 0x2: remote sensor 2 (ACPU cluster 0) * 0x3: remote sensor 3 (G3D) */ static inline void hi6220_thermal_sensor_select(void __iomem *addr, int sensor) { writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_SS_MSK) | (sensor << 12), addr + HI6220_TEMP0_CFG); } /* * Temperature configuration register - Hdak conversion polling interval * * Bits [5:4] * * 0x0 : 0.768 ms * 0x1 : 6.144 ms * 0x2 : 49.152 ms * 0x3 : 393.216 ms */ static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value) { writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_HDAK_MSK) | (value << 4), addr + HI6220_TEMP0_CFG); } static int hi6220_thermal_irq_handler(struct hisi_thermal_sensor *sensor) { struct hisi_thermal_data *data = sensor->data; hi6220_thermal_alarm_clear(data->regs, 1); return 0; } static int hi3660_thermal_irq_handler(struct hisi_thermal_sensor *sensor) { struct hisi_thermal_data *data = sensor->data; hi3660_thermal_alarm_clear(data->regs, sensor->id, 1); return 0; } static int hi6220_thermal_get_temp(struct hisi_thermal_sensor *sensor) { struct hisi_thermal_data *data = sensor->data; return hi6220_thermal_get_temperature(data->regs); } static int hi3660_thermal_get_temp(struct hisi_thermal_sensor *sensor) { struct hisi_thermal_data *data = sensor->data; return hi3660_thermal_get_temperature(data->regs, sensor->id); } static int hi6220_thermal_disable_sensor(struct hisi_thermal_sensor *sensor) { struct hisi_thermal_data *data = sensor->data; /* disable sensor module */ hi6220_thermal_enable(data->regs, 0); hi6220_thermal_alarm_enable(data->regs, 0); hi6220_thermal_reset_enable(data->regs, 0); clk_disable_unprepare(data->clk); return 0; } static int hi3660_thermal_disable_sensor(struct hisi_thermal_sensor *sensor) { struct hisi_thermal_data *data = sensor->data; /* disable sensor module */ hi3660_thermal_alarm_enable(data->regs, sensor->id, 0); return 0; } static int hi6220_thermal_enable_sensor(struct hisi_thermal_sensor *sensor) { struct hisi_thermal_data *data = sensor->data; int ret; /* enable clock for tsensor */ ret = clk_prepare_enable(data->clk); if (ret) return ret; /* disable module firstly */ hi6220_thermal_reset_enable(data->regs, 0); hi6220_thermal_enable(data->regs, 0); /* select sensor id */ hi6220_thermal_sensor_select(data->regs, sensor->id); /* setting the hdak time */ hi6220_thermal_hdak_set(data->regs, 0); /* setting lag value between current temp and the threshold */ hi6220_thermal_set_lag(data->regs, HI6220_TEMP_LAG); /* enable for interrupt */ hi6220_thermal_alarm_set(data->regs, sensor->thres_temp); hi6220_thermal_reset_set(data->regs, HI6220_TEMP_RESET); /* enable module */ hi6220_thermal_reset_enable(data->regs, 1); hi6220_thermal_enable(data->regs, 1); hi6220_thermal_alarm_clear(data->regs, 0); hi6220_thermal_alarm_enable(data->regs, 1); return 0; } static int hi3660_thermal_enable_sensor(struct hisi_thermal_sensor *sensor) { unsigned int value; struct hisi_thermal_data *data = sensor->data; /* disable interrupt */ hi3660_thermal_alarm_enable(data->regs, sensor->id, 0); /* setting lag value between current temp and the threshold */ hi3660_thermal_set_lag(data->regs, sensor->id, HI3660_TEMP_LAG); /* set interrupt threshold */ value = hi3660_thermal_temp_to_step(sensor->thres_temp); hi3660_thermal_alarm_set(data->regs, sensor->id, value); /* enable interrupt */ hi3660_thermal_alarm_clear(data->regs, sensor->id, 1); hi3660_thermal_alarm_enable(data->regs, sensor->id, 1); return 0; } static int hi6220_thermal_probe(struct hisi_thermal_data *data) { struct platform_device *pdev = data->pdev; struct device *dev = &pdev->dev; int ret; data->clk = devm_clk_get(dev, "thermal_clk"); if (IS_ERR(data->clk)) { ret = PTR_ERR(data->clk); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get thermal clk: %d\n", ret); return ret; } data->sensor = devm_kzalloc(dev, sizeof(*data->sensor), GFP_KERNEL); if (!data->sensor) return -ENOMEM; data->sensor[0].id = HI6220_CLUSTER0_SENSOR; data->sensor[0].irq_name = "tsensor_intr"; data->sensor[0].data = data; data->nr_sensors = 1; return 0; } static int hi3660_thermal_probe(struct hisi_thermal_data *data) { struct platform_device *pdev = data->pdev; struct device *dev = &pdev->dev; data->nr_sensors = 1; data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) * data->nr_sensors, GFP_KERNEL); if (!data->sensor) return -ENOMEM; data->sensor[0].id = HI3660_BIG_SENSOR; data->sensor[0].irq_name = "tsensor_a73"; data->sensor[0].data = data; return 0; } static int hisi_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct hisi_thermal_sensor *sensor = thermal_zone_device_priv(tz); struct hisi_thermal_data *data = sensor->data; *temp = data->ops->get_temp(sensor); return 0; } static const struct thermal_zone_device_ops hisi_of_thermal_ops = { .get_temp = hisi_thermal_get_temp, }; static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) { struct hisi_thermal_sensor *sensor = dev; struct hisi_thermal_data *data = sensor->data; int temp = 0; data->ops->irq_handler(sensor); temp = data->ops->get_temp(sensor); if (temp >= sensor->thres_temp) { dev_crit(&data->pdev->dev, "sensor <%d> THERMAL ALARM: %d > %d\n", sensor->id, temp, sensor->thres_temp); thermal_zone_device_update(sensor->tzd, THERMAL_EVENT_UNSPECIFIED); } else { dev_crit(&data->pdev->dev, "sensor <%d> THERMAL ALARM stopped: %d < %d\n", sensor->id, temp, sensor->thres_temp); } return IRQ_HANDLED; } static int hisi_thermal_register_sensor(struct platform_device *pdev, struct hisi_thermal_sensor *sensor) { int ret, i; struct thermal_trip trip; sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, sensor->id, sensor, &hisi_of_thermal_ops); if (IS_ERR(sensor->tzd)) { ret = PTR_ERR(sensor->tzd); sensor->tzd = NULL; dev_err(&pdev->dev, "failed to register sensor id %d: %d\n", sensor->id, ret); return ret; } for (i = 0; i < thermal_zone_get_num_trips(sensor->tzd); i++) { thermal_zone_get_trip(sensor->tzd, i, &trip); if (trip.type == THERMAL_TRIP_PASSIVE) { sensor->thres_temp = trip.temperature; break; } } return 0; } static const struct hisi_thermal_ops hi6220_ops = { .get_temp = hi6220_thermal_get_temp, .enable_sensor = hi6220_thermal_enable_sensor, .disable_sensor = hi6220_thermal_disable_sensor, .irq_handler = hi6220_thermal_irq_handler, .probe = hi6220_thermal_probe, }; static const struct hisi_thermal_ops hi3660_ops = { .get_temp = hi3660_thermal_get_temp, .enable_sensor = hi3660_thermal_enable_sensor, .disable_sensor = hi3660_thermal_disable_sensor, .irq_handler = hi3660_thermal_irq_handler, .probe = hi3660_thermal_probe, }; static const struct of_device_id of_hisi_thermal_match[] = { { .compatible = "hisilicon,tsensor", .data = &hi6220_ops, }, { .compatible = "hisilicon,hi3660-tsensor", .data = &hi3660_ops, }, { /* end */ } }; MODULE_DEVICE_TABLE(of, of_hisi_thermal_match); static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor, bool on) { struct thermal_zone_device *tzd = sensor->tzd; if (on) thermal_zone_device_enable(tzd); else thermal_zone_device_disable(tzd); } static int hisi_thermal_probe(struct platform_device *pdev) { struct hisi_thermal_data *data; struct device *dev = &pdev->dev; int i, ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->pdev = pdev; platform_set_drvdata(pdev, data); data->ops = of_device_get_match_data(dev); data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) return PTR_ERR(data->regs); ret = data->ops->probe(data); if (ret) return ret; for (i = 0; i < data->nr_sensors; i++) { struct hisi_thermal_sensor *sensor = &data->sensor[i]; ret = hisi_thermal_register_sensor(pdev, sensor); if (ret) { dev_err(dev, "failed to register thermal sensor: %d\n", ret); return ret; } ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; ret = devm_request_threaded_irq(dev, ret, NULL, hisi_thermal_alarm_irq_thread, IRQF_ONESHOT, sensor->irq_name, sensor); if (ret < 0) { dev_err(dev, "Failed to request alarm irq: %d\n", ret); return ret; } ret = data->ops->enable_sensor(sensor); if (ret) { dev_err(dev, "Failed to setup the sensor: %d\n", ret); return ret; } hisi_thermal_toggle_sensor(sensor, true); } return 0; } static int hisi_thermal_remove(struct platform_device *pdev) { struct hisi_thermal_data *data = platform_get_drvdata(pdev); int i; for (i = 0; i < data->nr_sensors; i++) { struct hisi_thermal_sensor *sensor = &data->sensor[i]; hisi_thermal_toggle_sensor(sensor, false); data->ops->disable_sensor(sensor); } return 0; } static int hisi_thermal_suspend(struct device *dev) { struct hisi_thermal_data *data = dev_get_drvdata(dev); int i; for (i = 0; i < data->nr_sensors; i++) data->ops->disable_sensor(&data->sensor[i]); return 0; } static int hisi_thermal_resume(struct device *dev) { struct hisi_thermal_data *data = dev_get_drvdata(dev); int i, ret = 0; for (i = 0; i < data->nr_sensors; i++) ret |= data->ops->enable_sensor(&data->sensor[i]); return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops, hisi_thermal_suspend, hisi_thermal_resume); static struct platform_driver hisi_thermal_driver = { .driver = { .name = "hisi_thermal", .pm = pm_sleep_ptr(&hisi_thermal_pm_ops), .of_match_table = of_hisi_thermal_match, }, .probe = hisi_thermal_probe, .remove = hisi_thermal_remove, }; module_platform_driver(hisi_thermal_driver); MODULE_AUTHOR("Xinwei Kong <[email protected]>"); MODULE_AUTHOR("Leo Yan <[email protected]>"); MODULE_DESCRIPTION("HiSilicon thermal driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/hisi_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * thermal.c - Generic Thermal Management Sysfs support. * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <[email protected]> * Copyright (C) 2008 Sujith Thomas <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/kdev_t.h> #include <linux/idr.h> #include <linux/thermal.h> #include <linux/reboot.h> #include <linux/string.h> #include <linux/of.h> #include <linux/suspend.h> #define CREATE_TRACE_POINTS #include "thermal_trace.h" #include "thermal_core.h" #include "thermal_hwmon.h" static DEFINE_IDA(thermal_tz_ida); static DEFINE_IDA(thermal_cdev_ida); static LIST_HEAD(thermal_tz_list); static LIST_HEAD(thermal_cdev_list); static LIST_HEAD(thermal_governor_list); static DEFINE_MUTEX(thermal_list_lock); static DEFINE_MUTEX(thermal_governor_lock); static atomic_t in_suspend; static struct thermal_governor *def_governor; /* * Governor section: set of functions to handle thermal governors * * Functions to help in the life cycle of thermal governors within * the thermal core and by the thermal governor code. */ static struct thermal_governor *__find_governor(const char *name) { struct thermal_governor *pos; if (!name || !name[0]) return def_governor; list_for_each_entry(pos, &thermal_governor_list, governor_list) if (!strncasecmp(name, pos->name, THERMAL_NAME_LENGTH)) return pos; return NULL; } /** * bind_previous_governor() - bind the previous governor of the thermal zone * @tz: a valid pointer to a struct thermal_zone_device * @failed_gov_name: the name of the governor that failed to register * * Register the previous governor of the thermal zone after a new * governor has failed to be bound. */ static void bind_previous_governor(struct thermal_zone_device *tz, const char *failed_gov_name) { if (tz->governor && tz->governor->bind_to_tz) { if (tz->governor->bind_to_tz(tz)) { dev_err(&tz->device, "governor %s failed to bind and the previous one (%s) failed to bind again, thermal zone %s has no governor\n", failed_gov_name, tz->governor->name, tz->type); tz->governor = NULL; } } } /** * thermal_set_governor() - Switch to another governor * @tz: a valid pointer to a struct thermal_zone_device * @new_gov: pointer to the new governor * * Change the governor of thermal zone @tz. * * Return: 0 on success, an error if the new governor's bind_to_tz() failed. */ static int thermal_set_governor(struct thermal_zone_device *tz, struct thermal_governor *new_gov) { int ret = 0; if (tz->governor && tz->governor->unbind_from_tz) tz->governor->unbind_from_tz(tz); if (new_gov && new_gov->bind_to_tz) { ret = new_gov->bind_to_tz(tz); if (ret) { bind_previous_governor(tz, new_gov->name); return ret; } } tz->governor = new_gov; return ret; } int thermal_register_governor(struct thermal_governor *governor) { int err; const char *name; struct thermal_zone_device *pos; if (!governor) return -EINVAL; mutex_lock(&thermal_governor_lock); err = -EBUSY; if (!__find_governor(governor->name)) { bool match_default; err = 0; list_add(&governor->governor_list, &thermal_governor_list); match_default = !strncmp(governor->name, DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH); if (!def_governor && match_default) def_governor = governor; } mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) { /* * only thermal zones with specified tz->tzp->governor_name * may run with tz->govenor unset */ if (pos->governor) continue; name = pos->tzp->governor_name; if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) { int ret; ret = thermal_set_governor(pos, governor); if (ret) dev_err(&pos->device, "Failed to set governor %s for thermal zone %s: %d\n", governor->name, pos->type, ret); } } mutex_unlock(&thermal_list_lock); mutex_unlock(&thermal_governor_lock); return err; } void thermal_unregister_governor(struct thermal_governor *governor) { struct thermal_zone_device *pos; if (!governor) return; mutex_lock(&thermal_governor_lock); if (!__find_governor(governor->name)) goto exit; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) { if (!strncasecmp(pos->governor->name, governor->name, THERMAL_NAME_LENGTH)) thermal_set_governor(pos, NULL); } mutex_unlock(&thermal_list_lock); list_del(&governor->governor_list); exit: mutex_unlock(&thermal_governor_lock); } int thermal_zone_device_set_policy(struct thermal_zone_device *tz, char *policy) { struct thermal_governor *gov; int ret = -EINVAL; mutex_lock(&thermal_governor_lock); mutex_lock(&tz->lock); if (!device_is_registered(&tz->device)) goto exit; gov = __find_governor(strim(policy)); if (!gov) goto exit; ret = thermal_set_governor(tz, gov); exit: mutex_unlock(&tz->lock); mutex_unlock(&thermal_governor_lock); thermal_notify_tz_gov_change(tz->id, policy); return ret; } int thermal_build_list_of_policies(char *buf) { struct thermal_governor *pos; ssize_t count = 0; mutex_lock(&thermal_governor_lock); list_for_each_entry(pos, &thermal_governor_list, governor_list) { count += sysfs_emit_at(buf, count, "%s ", pos->name); } count += sysfs_emit_at(buf, count, "\n"); mutex_unlock(&thermal_governor_lock); return count; } static void __init thermal_unregister_governors(void) { struct thermal_governor **governor; for_each_governor_table(governor) thermal_unregister_governor(*governor); } static int __init thermal_register_governors(void) { int ret = 0; struct thermal_governor **governor; for_each_governor_table(governor) { ret = thermal_register_governor(*governor); if (ret) { pr_err("Failed to register governor: '%s'", (*governor)->name); break; } pr_info("Registered thermal governor '%s'", (*governor)->name); } if (ret) { struct thermal_governor **gov; for_each_governor_table(gov) { if (gov == governor) break; thermal_unregister_governor(*gov); } } return ret; } /* * Zone update section: main control loop applied to each zone while monitoring * * in polling mode. The monitoring is done using a workqueue. * Same update may be done on a zone by calling thermal_zone_device_update(). * * An update means: * - Non-critical trips will invoke the governor responsible for that zone; * - Hot trips will produce a notification to userspace; * - Critical trip point will cause a system shutdown. */ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, unsigned long delay) { if (delay) mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay); else cancel_delayed_work(&tz->poll_queue); } static void monitor_thermal_zone(struct thermal_zone_device *tz) { if (tz->mode != THERMAL_DEVICE_ENABLED) thermal_zone_device_set_polling(tz, 0); else if (tz->passive) thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies); else if (tz->polling_delay_jiffies) thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies); } static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip) { tz->governor ? tz->governor->throttle(tz, trip) : def_governor->throttle(tz, trip); } void thermal_zone_device_critical(struct thermal_zone_device *tz) { /* * poweroff_delay_ms must be a carefully profiled positive value. * Its a must for forced_emergency_poweroff_work to be scheduled. */ int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; dev_emerg(&tz->device, "%s: critical temperature reached, " "shutting down\n", tz->type); hw_protection_shutdown("Temperature too high", poweroff_delay_ms); } EXPORT_SYMBOL(thermal_zone_device_critical); static void handle_critical_trips(struct thermal_zone_device *tz, int trip, int trip_temp, enum thermal_trip_type trip_type) { /* If we have not crossed the trip_temp, we do not care. */ if (trip_temp <= 0 || tz->temperature < trip_temp) return; trace_thermal_zone_trip(tz, trip, trip_type); if (trip_type == THERMAL_TRIP_HOT && tz->ops->hot) tz->ops->hot(tz); else if (trip_type == THERMAL_TRIP_CRITICAL) tz->ops->critical(tz); } static void handle_thermal_trip(struct thermal_zone_device *tz, int trip_id) { struct thermal_trip trip; /* Ignore disabled trip points */ if (test_bit(trip_id, &tz->trips_disabled)) return; __thermal_zone_get_trip(tz, trip_id, &trip); if (trip.temperature == THERMAL_TEMP_INVALID) return; if (tz->last_temperature != THERMAL_TEMP_INVALID) { if (tz->last_temperature < trip.temperature && tz->temperature >= trip.temperature) thermal_notify_tz_trip_up(tz->id, trip_id, tz->temperature); if (tz->last_temperature >= trip.temperature && tz->temperature < (trip.temperature - trip.hysteresis)) thermal_notify_tz_trip_down(tz->id, trip_id, tz->temperature); } if (trip.type == THERMAL_TRIP_CRITICAL || trip.type == THERMAL_TRIP_HOT) handle_critical_trips(tz, trip_id, trip.temperature, trip.type); else handle_non_critical_trips(tz, trip_id); } static void update_temperature(struct thermal_zone_device *tz) { int temp, ret; ret = __thermal_zone_get_temp(tz, &temp); if (ret) { if (ret != -EAGAIN) dev_warn(&tz->device, "failed to read out thermal zone (%d)\n", ret); return; } tz->last_temperature = tz->temperature; tz->temperature = temp; trace_thermal_temperature(tz); thermal_genl_sampling_temp(tz->id, temp); } static void thermal_zone_device_init(struct thermal_zone_device *tz) { struct thermal_instance *pos; tz->temperature = THERMAL_TEMP_INVALID; tz->prev_low_trip = -INT_MAX; tz->prev_high_trip = INT_MAX; list_for_each_entry(pos, &tz->thermal_instances, tz_node) pos->initialized = false; } void __thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { int count; if (atomic_read(&in_suspend)) return; if (WARN_ONCE(!tz->ops->get_temp, "'%s' must not be called without 'get_temp' ops set\n", __func__)) return; if (!thermal_zone_device_is_enabled(tz)) return; update_temperature(tz); __thermal_zone_set_trips(tz); tz->notify_event = event; for (count = 0; count < tz->num_trips; count++) handle_thermal_trip(tz, count); monitor_thermal_zone(tz); } static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { int ret = 0; mutex_lock(&tz->lock); /* do nothing if mode isn't changing */ if (mode == tz->mode) { mutex_unlock(&tz->lock); return ret; } if (!device_is_registered(&tz->device)) { mutex_unlock(&tz->lock); return -ENODEV; } if (tz->ops->change_mode) ret = tz->ops->change_mode(tz, mode); if (!ret) tz->mode = mode; __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); mutex_unlock(&tz->lock); if (mode == THERMAL_DEVICE_ENABLED) thermal_notify_tz_enable(tz->id); else thermal_notify_tz_disable(tz->id); return ret; } int thermal_zone_device_enable(struct thermal_zone_device *tz) { return thermal_zone_device_set_mode(tz, THERMAL_DEVICE_ENABLED); } EXPORT_SYMBOL_GPL(thermal_zone_device_enable); int thermal_zone_device_disable(struct thermal_zone_device *tz) { return thermal_zone_device_set_mode(tz, THERMAL_DEVICE_DISABLED); } EXPORT_SYMBOL_GPL(thermal_zone_device_disable); int thermal_zone_device_is_enabled(struct thermal_zone_device *tz) { lockdep_assert_held(&tz->lock); return tz->mode == THERMAL_DEVICE_ENABLED; } void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { mutex_lock(&tz->lock); if (device_is_registered(&tz->device)) __thermal_zone_device_update(tz, event); mutex_unlock(&tz->lock); } EXPORT_SYMBOL_GPL(thermal_zone_device_update); /** * thermal_zone_device_exec - Run a callback under the zone lock. * @tz: Thermal zone. * @cb: Callback to run. * @data: Data to pass to the callback. */ void thermal_zone_device_exec(struct thermal_zone_device *tz, void (*cb)(struct thermal_zone_device *, unsigned long), unsigned long data) { mutex_lock(&tz->lock); cb(tz, data); mutex_unlock(&tz->lock); } EXPORT_SYMBOL_GPL(thermal_zone_device_exec); static void thermal_zone_device_check(struct work_struct *work) { struct thermal_zone_device *tz = container_of(work, struct thermal_zone_device, poll_queue.work); thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); } int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), void *data) { struct thermal_governor *gov; int ret = 0; mutex_lock(&thermal_governor_lock); list_for_each_entry(gov, &thermal_governor_list, governor_list) { ret = cb(gov, data); if (ret) break; } mutex_unlock(&thermal_governor_lock); return ret; } int for_each_thermal_cooling_device(int (*cb)(struct thermal_cooling_device *, void *), void *data) { struct thermal_cooling_device *cdev; int ret = 0; mutex_lock(&thermal_list_lock); list_for_each_entry(cdev, &thermal_cdev_list, node) { ret = cb(cdev, data); if (ret) break; } mutex_unlock(&thermal_list_lock); return ret; } int for_each_thermal_zone(int (*cb)(struct thermal_zone_device *, void *), void *data) { struct thermal_zone_device *tz; int ret = 0; mutex_lock(&thermal_list_lock); list_for_each_entry(tz, &thermal_tz_list, node) { ret = cb(tz, data); if (ret) break; } mutex_unlock(&thermal_list_lock); return ret; } struct thermal_zone_device *thermal_zone_get_by_id(int id) { struct thermal_zone_device *tz, *match = NULL; mutex_lock(&thermal_list_lock); list_for_each_entry(tz, &thermal_tz_list, node) { if (tz->id == id) { match = tz; break; } } mutex_unlock(&thermal_list_lock); return match; } /* * Device management section: cooling devices, zones devices, and binding * * Set of functions provided by the thermal core for: * - cooling devices lifecycle: registration, unregistration, * binding, and unbinding. * - thermal zone devices lifecycle: registration, unregistration, * binding, and unbinding. */ /** * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone * @tz: pointer to struct thermal_zone_device * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. * @cdev: pointer to struct thermal_cooling_device * @upper: the Maximum cooling state for this trip point. * THERMAL_NO_LIMIT means no upper limit, * and the cooling device can be in max_state. * @lower: the Minimum cooling state can be used for this trip point. * THERMAL_NO_LIMIT means no lower limit, * and the cooling device can be in cooling state 0. * @weight: The weight of the cooling device to be bound to the * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the * default value * * This interface function bind a thermal cooling device to the certain trip * point of a thermal zone device. * This function is usually called in the thermal zone device .bind callback. * * Return: 0 on success, the proper error value otherwise. */ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev, unsigned long upper, unsigned long lower, unsigned int weight) { struct thermal_instance *dev; struct thermal_instance *pos; struct thermal_zone_device *pos1; struct thermal_cooling_device *pos2; bool upper_no_limit; int result; if (trip >= tz->num_trips || trip < 0) return -EINVAL; list_for_each_entry(pos1, &thermal_tz_list, node) { if (pos1 == tz) break; } list_for_each_entry(pos2, &thermal_cdev_list, node) { if (pos2 == cdev) break; } if (tz != pos1 || cdev != pos2) return -EINVAL; /* lower default 0, upper default max_state */ lower = lower == THERMAL_NO_LIMIT ? 0 : lower; if (upper == THERMAL_NO_LIMIT) { upper = cdev->max_state; upper_no_limit = true; } else { upper_no_limit = false; } if (lower > upper || upper > cdev->max_state) return -EINVAL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->tz = tz; dev->cdev = cdev; dev->trip = trip; dev->upper = upper; dev->upper_no_limit = upper_no_limit; dev->lower = lower; dev->target = THERMAL_NO_TARGET; dev->weight = weight; result = ida_alloc(&tz->ida, GFP_KERNEL); if (result < 0) goto free_mem; dev->id = result; sprintf(dev->name, "cdev%d", dev->id); result = sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name); if (result) goto release_ida; sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; dev->attr.attr.mode = 0444; dev->attr.show = trip_point_show; result = device_create_file(&tz->device, &dev->attr); if (result) goto remove_symbol_link; sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id); sysfs_attr_init(&dev->weight_attr.attr); dev->weight_attr.attr.name = dev->weight_attr_name; dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO; dev->weight_attr.show = weight_show; dev->weight_attr.store = weight_store; result = device_create_file(&tz->device, &dev->weight_attr); if (result) goto remove_trip_file; mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry(pos, &tz->thermal_instances, tz_node) if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { result = -EEXIST; break; } if (!result) { list_add_tail(&dev->tz_node, &tz->thermal_instances); list_add_tail(&dev->cdev_node, &cdev->thermal_instances); atomic_set(&tz->need_update, 1); } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); if (!result) return 0; device_remove_file(&tz->device, &dev->weight_attr); remove_trip_file: device_remove_file(&tz->device, &dev->attr); remove_symbol_link: sysfs_remove_link(&tz->device.kobj, dev->name); release_ida: ida_free(&tz->ida, dev->id); free_mem: kfree(dev); return result; } EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device); /** * thermal_zone_unbind_cooling_device() - unbind a cooling device from a * thermal zone. * @tz: pointer to a struct thermal_zone_device. * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. * @cdev: pointer to a struct thermal_cooling_device. * * This interface function unbind a thermal cooling device from the certain * trip point of a thermal zone device. * This function is usually called in the thermal zone device .unbind callback. * * Return: 0 on success, the proper error value otherwise. */ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev) { struct thermal_instance *pos, *next; mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) { if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { list_del(&pos->tz_node); list_del(&pos->cdev_node); mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); goto unbind; } } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); return -ENODEV; unbind: device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); ida_free(&tz->ida, pos->id); kfree(pos); return 0; } EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device); static void thermal_release(struct device *dev) { struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; if (!strncmp(dev_name(dev), "thermal_zone", sizeof("thermal_zone") - 1)) { tz = to_thermal_zone(dev); thermal_zone_destroy_device_groups(tz); mutex_destroy(&tz->lock); kfree(tz); } else if (!strncmp(dev_name(dev), "cooling_device", sizeof("cooling_device") - 1)) { cdev = to_cooling_device(dev); thermal_cooling_device_destroy_sysfs(cdev); kfree(cdev->type); ida_free(&thermal_cdev_ida, cdev->id); kfree(cdev); } } static struct class *thermal_class; static inline void print_bind_err_msg(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int ret) { dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", tz->type, cdev->type, ret); } static void bind_cdev(struct thermal_cooling_device *cdev) { int ret; struct thermal_zone_device *pos = NULL; list_for_each_entry(pos, &thermal_tz_list, node) { if (pos->ops->bind) { ret = pos->ops->bind(pos, cdev); if (ret) print_bind_err_msg(pos, cdev, ret); } } } /** * __thermal_cooling_device_register() - register a new thermal cooling device * @np: a pointer to a device tree node. * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. * * This interface function adds a new thermal cooling device (fan/processor/...) * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself * to all the thermal zone devices registered at the same time. * It also gives the opportunity to link the cooling device to a device tree * node, so that it can be bound to a thermal zone created out of device tree. * * Return: a pointer to the created struct thermal_cooling_device or an * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ static struct thermal_cooling_device * __thermal_cooling_device_register(struct device_node *np, const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; int id, ret; if (!ops || !ops->get_max_state || !ops->get_cur_state || !ops->set_cur_state) return ERR_PTR(-EINVAL); if (!thermal_class) return ERR_PTR(-ENODEV); cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) return ERR_PTR(-ENOMEM); ret = ida_alloc(&thermal_cdev_ida, GFP_KERNEL); if (ret < 0) goto out_kfree_cdev; cdev->id = ret; id = ret; cdev->type = kstrdup(type ? type : "", GFP_KERNEL); if (!cdev->type) { ret = -ENOMEM; goto out_ida_remove; } mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->thermal_instances); cdev->np = np; cdev->ops = ops; cdev->updated = false; cdev->device.class = thermal_class; cdev->devdata = devdata; ret = cdev->ops->get_max_state(cdev, &cdev->max_state); if (ret) goto out_cdev_type; thermal_cooling_device_setup_sysfs(cdev); ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id); if (ret) goto out_cooling_dev; ret = device_register(&cdev->device); if (ret) { /* thermal_release() handles rest of the cleanup */ put_device(&cdev->device); return ERR_PTR(ret); } /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); list_add(&cdev->node, &thermal_cdev_list); /* Update binding information for 'this' new cdev */ bind_cdev(cdev); list_for_each_entry(pos, &thermal_tz_list, node) if (atomic_cmpxchg(&pos->need_update, 1, 0)) thermal_zone_device_update(pos, THERMAL_EVENT_UNSPECIFIED); mutex_unlock(&thermal_list_lock); return cdev; out_cooling_dev: thermal_cooling_device_destroy_sysfs(cdev); out_cdev_type: kfree(cdev->type); out_ida_remove: ida_free(&thermal_cdev_ida, id); out_kfree_cdev: kfree(cdev); return ERR_PTR(ret); } /** * thermal_cooling_device_register() - register a new thermal cooling device * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. * * This interface function adds a new thermal cooling device (fan/processor/...) * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself * to all the thermal zone devices registered at the same time. * * Return: a pointer to the created struct thermal_cooling_device or an * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ struct thermal_cooling_device * thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return __thermal_cooling_device_register(NULL, type, devdata, ops); } EXPORT_SYMBOL_GPL(thermal_cooling_device_register); /** * thermal_of_cooling_device_register() - register an OF thermal cooling device * @np: a pointer to a device tree node. * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. * * This function will register a cooling device with device tree node reference. * This interface function adds a new thermal cooling device (fan/processor/...) * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself * to all the thermal zone devices registered at the same time. * * Return: a pointer to the created struct thermal_cooling_device or an * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return __thermal_cooling_device_register(np, type, devdata, ops); } EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register); static void thermal_cooling_device_release(struct device *dev, void *res) { thermal_cooling_device_unregister( *(struct thermal_cooling_device **)res); } /** * devm_thermal_of_cooling_device_register() - register an OF thermal cooling * device * @dev: a valid struct device pointer of a sensor device. * @np: a pointer to a device tree node. * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. * * This function will register a cooling device with device tree node reference. * This interface function adds a new thermal cooling device (fan/processor/...) * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself * to all the thermal zone devices registered at the same time. * * Return: a pointer to the created struct thermal_cooling_device or an * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { struct thermal_cooling_device **ptr, *tcd; ptr = devres_alloc(thermal_cooling_device_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); tcd = __thermal_cooling_device_register(np, type, devdata, ops); if (IS_ERR(tcd)) { devres_free(ptr); return tcd; } *ptr = tcd; devres_add(dev, ptr); return tcd; } EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register); static bool thermal_cooling_device_present(struct thermal_cooling_device *cdev) { struct thermal_cooling_device *pos = NULL; list_for_each_entry(pos, &thermal_cdev_list, node) { if (pos == cdev) return true; } return false; } /** * thermal_cooling_device_update - Update a cooling device object * @cdev: Target cooling device. * * Update @cdev to reflect a change of the underlying hardware or platform. * * Must be called when the maximum cooling state of @cdev becomes invalid and so * its .get_max_state() callback needs to be run to produce the new maximum * cooling state value. */ void thermal_cooling_device_update(struct thermal_cooling_device *cdev) { struct thermal_instance *ti; unsigned long state; if (IS_ERR_OR_NULL(cdev)) return; /* * Hold thermal_list_lock throughout the update to prevent the device * from going away while being updated. */ mutex_lock(&thermal_list_lock); if (!thermal_cooling_device_present(cdev)) goto unlock_list; /* * Update under the cdev lock to prevent the state from being set beyond * the new limit concurrently. */ mutex_lock(&cdev->lock); if (cdev->ops->get_max_state(cdev, &cdev->max_state)) goto unlock; thermal_cooling_device_stats_reinit(cdev); list_for_each_entry(ti, &cdev->thermal_instances, cdev_node) { if (ti->upper == cdev->max_state) continue; if (ti->upper < cdev->max_state) { if (ti->upper_no_limit) ti->upper = cdev->max_state; continue; } ti->upper = cdev->max_state; if (ti->lower > ti->upper) ti->lower = ti->upper; if (ti->target == THERMAL_NO_TARGET) continue; if (ti->target > ti->upper) ti->target = ti->upper; } if (cdev->ops->get_cur_state(cdev, &state) || state > cdev->max_state) goto unlock; thermal_cooling_device_stats_update(cdev, state); unlock: mutex_unlock(&cdev->lock); unlock_list: mutex_unlock(&thermal_list_lock); } EXPORT_SYMBOL_GPL(thermal_cooling_device_update); /** * thermal_cooling_device_unregister - removes a thermal cooling device * @cdev: the thermal cooling device to remove. * * thermal_cooling_device_unregister() must be called when a registered * thermal cooling device is no longer needed. */ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) { struct thermal_zone_device *tz; if (!cdev) return; mutex_lock(&thermal_list_lock); if (!thermal_cooling_device_present(cdev)) { mutex_unlock(&thermal_list_lock); return; } list_del(&cdev->node); /* Unbind all thermal zones associated with 'this' cdev */ list_for_each_entry(tz, &thermal_tz_list, node) { if (tz->ops->unbind) tz->ops->unbind(tz, cdev); } mutex_unlock(&thermal_list_lock); device_unregister(&cdev->device); } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); static void bind_tz(struct thermal_zone_device *tz) { int ret; struct thermal_cooling_device *pos = NULL; if (!tz->ops->bind) return; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_cdev_list, node) { ret = tz->ops->bind(tz, pos); if (ret) print_bind_err_msg(tz, pos, ret); } mutex_unlock(&thermal_list_lock); } static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms) { *delay_jiffies = msecs_to_jiffies(delay_ms); if (delay_ms > 1000) *delay_jiffies = round_jiffies(*delay_jiffies); } int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp) { int i, ret = -EINVAL; if (tz->ops->get_crit_temp) return tz->ops->get_crit_temp(tz, temp); if (!tz->trips) return -EINVAL; mutex_lock(&tz->lock); for (i = 0; i < tz->num_trips; i++) { if (tz->trips[i].type == THERMAL_TRIP_CRITICAL) { *temp = tz->trips[i].temperature; ret = 0; break; } } mutex_unlock(&tz->lock); return ret; } EXPORT_SYMBOL_GPL(thermal_zone_get_crit_temp); /** * thermal_zone_device_register_with_trips() - register a new thermal zone device * @type: the thermal zone device type * @trips: a pointer to an array of thermal trips * @num_trips: the number of trip points the thermal zone support * @mask: a bit string indicating the writeablility of trip points * @devdata: private device data * @ops: standard thermal zone device callbacks * @tzp: thermal zone platform parameters * @passive_delay: number of milliseconds to wait between polls when * performing passive cooling * @polling_delay: number of milliseconds to wait between polls when checking * whether trip points have been crossed (0 for interrupt * driven systems) * * This interface function adds a new thermal zone device (sensor) to * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the * thermal cooling devices registered at the same time. * thermal_zone_device_unregister() must be called when the device is no * longer needed. The passive cooling depends on the .get_trend() return value. * * Return: a pointer to the created struct thermal_zone_device or an * in case of error, an ERR_PTR. Caller must check return value with * IS_ERR*() helpers. */ struct thermal_zone_device * thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *trips, int num_trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { struct thermal_zone_device *tz; int id; int result; int count; struct thermal_governor *governor; if (!type || strlen(type) == 0) { pr_err("No thermal zone type defined\n"); return ERR_PTR(-EINVAL); } if (strlen(type) >= THERMAL_NAME_LENGTH) { pr_err("Thermal zone name (%s) too long, should be under %d chars\n", type, THERMAL_NAME_LENGTH); return ERR_PTR(-EINVAL); } /* * Max trip count can't exceed 31 as the "mask >> num_trips" condition. * For example, shifting by 32 will result in compiler warning: * warning: right shift count >= width of type [-Wshift-count- overflow] * * Also "mask >> num_trips" will always be true with 32 bit shift. * E.g. mask = 0x80000000 for trip id 31 to be RW. Then * mask >> 32 = 0x80000000 * This will result in failure for the below condition. * * Check will be true when the bit 31 of the mask is set. * 32 bit shift will cause overflow of 4 byte integer. */ if (num_trips > (BITS_PER_TYPE(int) - 1) || num_trips < 0 || mask >> num_trips) { pr_err("Incorrect number of thermal trips\n"); return ERR_PTR(-EINVAL); } if (!ops) { pr_err("Thermal zone device ops not defined\n"); return ERR_PTR(-EINVAL); } if (num_trips > 0 && !trips) return ERR_PTR(-EINVAL); if (!thermal_class) return ERR_PTR(-ENODEV); tz = kzalloc(sizeof(*tz), GFP_KERNEL); if (!tz) return ERR_PTR(-ENOMEM); if (tzp) { tz->tzp = kmemdup(tzp, sizeof(*tzp), GFP_KERNEL); if (!tz->tzp) { result = -ENOMEM; goto free_tz; } } INIT_LIST_HEAD(&tz->thermal_instances); ida_init(&tz->ida); mutex_init(&tz->lock); id = ida_alloc(&thermal_tz_ida, GFP_KERNEL); if (id < 0) { result = id; goto free_tzp; } tz->id = id; strscpy(tz->type, type, sizeof(tz->type)); if (!ops->critical) ops->critical = thermal_zone_device_critical; tz->ops = ops; tz->device.class = thermal_class; tz->devdata = devdata; tz->trips = trips; tz->num_trips = num_trips; thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay); thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay); /* sys I/F */ /* Add nodes that are always present via .groups */ result = thermal_zone_create_device_groups(tz, mask); if (result) goto remove_id; /* A new thermal zone needs to be updated anyway. */ atomic_set(&tz->need_update, 1); result = dev_set_name(&tz->device, "thermal_zone%d", tz->id); if (result) { thermal_zone_destroy_device_groups(tz); goto remove_id; } result = device_register(&tz->device); if (result) goto release_device; for (count = 0; count < num_trips; count++) { struct thermal_trip trip; result = thermal_zone_get_trip(tz, count, &trip); if (result || !trip.temperature) set_bit(count, &tz->trips_disabled); } /* Update 'this' zone's governor information */ mutex_lock(&thermal_governor_lock); if (tz->tzp) governor = __find_governor(tz->tzp->governor_name); else governor = def_governor; result = thermal_set_governor(tz, governor); if (result) { mutex_unlock(&thermal_governor_lock); goto unregister; } mutex_unlock(&thermal_governor_lock); if (!tz->tzp || !tz->tzp->no_hwmon) { result = thermal_add_hwmon_sysfs(tz); if (result) goto unregister; } mutex_lock(&thermal_list_lock); list_add_tail(&tz->node, &thermal_tz_list); mutex_unlock(&thermal_list_lock); /* Bind cooling devices for this zone */ bind_tz(tz); INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); thermal_zone_device_init(tz); /* Update the new thermal zone and mark it as already updated. */ if (atomic_cmpxchg(&tz->need_update, 1, 0)) thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); thermal_notify_tz_create(tz->id, tz->type); return tz; unregister: device_del(&tz->device); release_device: put_device(&tz->device); tz = NULL; remove_id: ida_free(&thermal_tz_ida, id); free_tzp: kfree(tz->tzp); free_tz: kfree(tz); return ERR_PTR(result); } EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips); struct thermal_zone_device *thermal_tripless_zone_device_register( const char *type, void *devdata, struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp) { return thermal_zone_device_register_with_trips(type, NULL, 0, 0, devdata, ops, tzp, 0, 0); } EXPORT_SYMBOL_GPL(thermal_tripless_zone_device_register); void *thermal_zone_device_priv(struct thermal_zone_device *tzd) { return tzd->devdata; } EXPORT_SYMBOL_GPL(thermal_zone_device_priv); const char *thermal_zone_device_type(struct thermal_zone_device *tzd) { return tzd->type; } EXPORT_SYMBOL_GPL(thermal_zone_device_type); int thermal_zone_device_id(struct thermal_zone_device *tzd) { return tzd->id; } EXPORT_SYMBOL_GPL(thermal_zone_device_id); struct device *thermal_zone_device(struct thermal_zone_device *tzd) { return &tzd->device; } EXPORT_SYMBOL_GPL(thermal_zone_device); /** * thermal_zone_device_unregister - removes the registered thermal zone device * @tz: the thermal zone device to remove */ void thermal_zone_device_unregister(struct thermal_zone_device *tz) { int tz_id; struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; if (!tz) return; tz_id = tz->id; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) if (pos == tz) break; if (pos != tz) { /* thermal zone device not found */ mutex_unlock(&thermal_list_lock); return; } list_del(&tz->node); /* Unbind all cdevs associated with 'this' thermal zone */ list_for_each_entry(cdev, &thermal_cdev_list, node) if (tz->ops->unbind) tz->ops->unbind(tz, cdev); mutex_unlock(&thermal_list_lock); cancel_delayed_work_sync(&tz->poll_queue); thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); ida_free(&thermal_tz_ida, tz->id); ida_destroy(&tz->ida); mutex_lock(&tz->lock); device_del(&tz->device); mutex_unlock(&tz->lock); kfree(tz->tzp); put_device(&tz->device); thermal_notify_tz_delete(tz_id); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); /** * thermal_zone_get_zone_by_name() - search for a zone and returns its ref * @name: thermal zone name to fetch the temperature * * When only one zone is found with the passed name, returns a reference to it. * * Return: On success returns a reference to an unique thermal zone with * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid * paramenters, -ENODEV for not found and -EEXIST for multiple matches). */ struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name) { struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL); unsigned int found = 0; if (!name) goto exit; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) if (!strncasecmp(name, pos->type, THERMAL_NAME_LENGTH)) { found++; ref = pos; } mutex_unlock(&thermal_list_lock); /* nothing has been found, thus an error code for it */ if (found == 0) ref = ERR_PTR(-ENODEV); else if (found > 1) /* Success only when an unique zone is found */ ref = ERR_PTR(-EEXIST); exit: return ref; } EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); static int thermal_pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) { struct thermal_zone_device *tz; switch (mode) { case PM_HIBERNATION_PREPARE: case PM_RESTORE_PREPARE: case PM_SUSPEND_PREPARE: atomic_set(&in_suspend, 1); break; case PM_POST_HIBERNATION: case PM_POST_RESTORE: case PM_POST_SUSPEND: atomic_set(&in_suspend, 0); list_for_each_entry(tz, &thermal_tz_list, node) { thermal_zone_device_init(tz); thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); } break; default: break; } return 0; } static struct notifier_block thermal_pm_nb = { .notifier_call = thermal_pm_notify, }; static int __init thermal_init(void) { int result; result = thermal_netlink_init(); if (result) goto error; result = thermal_register_governors(); if (result) goto unregister_netlink; thermal_class = kzalloc(sizeof(*thermal_class), GFP_KERNEL); if (!thermal_class) { result = -ENOMEM; goto unregister_governors; } thermal_class->name = "thermal"; thermal_class->dev_release = thermal_release; result = class_register(thermal_class); if (result) { kfree(thermal_class); thermal_class = NULL; goto unregister_governors; } result = register_pm_notifier(&thermal_pm_nb); if (result) pr_warn("Thermal: Can not register suspend notifier, return %d\n", result); return 0; unregister_governors: thermal_unregister_governors(); unregister_netlink: thermal_netlink_exit(); error: mutex_destroy(&thermal_list_lock); mutex_destroy(&thermal_governor_lock); return result; } postcore_initcall(thermal_init);
linux-master
drivers/thermal/thermal_core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Khadas MCU Controlled FAN driver * * Copyright (C) 2020 BayLibre SAS * Author(s): Neil Armstrong <[email protected]> */ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/mfd/khadas-mcu.h> #include <linux/regmap.h> #include <linux/sysfs.h> #include <linux/thermal.h> #define MAX_LEVEL 3 struct khadas_mcu_fan_ctx { struct khadas_mcu *mcu; unsigned int level; struct thermal_cooling_device *cdev; }; static int khadas_mcu_fan_set_level(struct khadas_mcu_fan_ctx *ctx, unsigned int level) { int ret; ret = regmap_write(ctx->mcu->regmap, KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG, level); if (ret) return ret; ctx->level = level; return 0; } static int khadas_mcu_fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { *state = MAX_LEVEL; return 0; } static int khadas_mcu_fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct khadas_mcu_fan_ctx *ctx = cdev->devdata; *state = ctx->level; return 0; } static int khadas_mcu_fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct khadas_mcu_fan_ctx *ctx = cdev->devdata; if (state > MAX_LEVEL) return -EINVAL; if (state == ctx->level) return 0; return khadas_mcu_fan_set_level(ctx, state); } static const struct thermal_cooling_device_ops khadas_mcu_fan_cooling_ops = { .get_max_state = khadas_mcu_fan_get_max_state, .get_cur_state = khadas_mcu_fan_get_cur_state, .set_cur_state = khadas_mcu_fan_set_cur_state, }; static int khadas_mcu_fan_probe(struct platform_device *pdev) { struct khadas_mcu *mcu = dev_get_drvdata(pdev->dev.parent); struct thermal_cooling_device *cdev; struct device *dev = &pdev->dev; struct khadas_mcu_fan_ctx *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->mcu = mcu; platform_set_drvdata(pdev, ctx); cdev = devm_thermal_of_cooling_device_register(dev->parent, dev->parent->of_node, "khadas-mcu-fan", ctx, &khadas_mcu_fan_cooling_ops); if (IS_ERR(cdev)) { ret = PTR_ERR(cdev); dev_err(dev, "Failed to register khadas-mcu-fan as cooling device: %d\n", ret); return ret; } ctx->cdev = cdev; return 0; } static void khadas_mcu_fan_shutdown(struct platform_device *pdev) { struct khadas_mcu_fan_ctx *ctx = platform_get_drvdata(pdev); khadas_mcu_fan_set_level(ctx, 0); } #ifdef CONFIG_PM_SLEEP static int khadas_mcu_fan_suspend(struct device *dev) { struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev); unsigned int level_save = ctx->level; int ret; ret = khadas_mcu_fan_set_level(ctx, 0); if (ret) return ret; ctx->level = level_save; return 0; } static int khadas_mcu_fan_resume(struct device *dev) { struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev); return khadas_mcu_fan_set_level(ctx, ctx->level); } #endif static SIMPLE_DEV_PM_OPS(khadas_mcu_fan_pm, khadas_mcu_fan_suspend, khadas_mcu_fan_resume); static const struct platform_device_id khadas_mcu_fan_id_table[] = { { .name = "khadas-mcu-fan-ctrl", }, {}, }; MODULE_DEVICE_TABLE(platform, khadas_mcu_fan_id_table); static struct platform_driver khadas_mcu_fan_driver = { .probe = khadas_mcu_fan_probe, .shutdown = khadas_mcu_fan_shutdown, .driver = { .name = "khadas-mcu-fan-ctrl", .pm = &khadas_mcu_fan_pm, }, .id_table = khadas_mcu_fan_id_table, }; module_platform_driver(khadas_mcu_fan_driver); MODULE_AUTHOR("Neil Armstrong <[email protected]>"); MODULE_DESCRIPTION("Khadas MCU FAN driver"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/khadas_mcu_fan.c
// SPDX-License-Identifier: GPL-2.0 /* * thermal_helpers.c - helper functions to handle thermal devices * * Copyright (C) 2016 Eduardo Valentin <[email protected]> * * Highly based on original thermal_core.c * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <[email protected]> * Copyright (C) 2008 Sujith Thomas <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/sysfs.h> #include "thermal_core.h" #include "thermal_trace.h" int get_tz_trend(struct thermal_zone_device *tz, int trip_index) { struct thermal_trip *trip = tz->trips ? &tz->trips[trip_index] : NULL; enum thermal_trend trend; if (tz->emul_temperature || !tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) { if (tz->temperature > tz->last_temperature) trend = THERMAL_TREND_RAISING; else if (tz->temperature < tz->last_temperature) trend = THERMAL_TREND_DROPPING; else trend = THERMAL_TREND_STABLE; } return trend; } struct thermal_instance * get_thermal_instance(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int trip) { struct thermal_instance *pos = NULL; struct thermal_instance *target_instance = NULL; mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry(pos, &tz->thermal_instances, tz_node) { if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { target_instance = pos; break; } } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); return target_instance; } EXPORT_SYMBOL(get_thermal_instance); /** * __thermal_zone_get_temp() - returns the temperature of a thermal zone * @tz: a valid pointer to a struct thermal_zone_device * @temp: a valid pointer to where to store the resulting temperature. * * When a valid thermal zone reference is passed, it will fetch its * temperature and fill @temp. * * Both tz and tz->ops must be valid pointers when calling this function, * and the tz->ops->get_temp callback must be provided. * The function must be called under tz->lock. * * Return: On success returns 0, an error code otherwise */ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) { int ret = -EINVAL; int count; int crit_temp = INT_MAX; struct thermal_trip trip; lockdep_assert_held(&tz->lock); ret = tz->ops->get_temp(tz, temp); if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) { for (count = 0; count < tz->num_trips; count++) { ret = __thermal_zone_get_trip(tz, count, &trip); if (!ret && trip.type == THERMAL_TRIP_CRITICAL) { crit_temp = trip.temperature; break; } } /* * Only allow emulating a temperature when the real temperature * is below the critical temperature so that the emulation code * cannot hide critical conditions. */ if (!ret && *temp < crit_temp) *temp = tz->emul_temperature; } if (ret) dev_dbg(&tz->device, "Failed to get temperature: %d\n", ret); return ret; } /** * thermal_zone_get_temp() - returns the temperature of a thermal zone * @tz: a valid pointer to a struct thermal_zone_device * @temp: a valid pointer to where to store the resulting temperature. * * When a valid thermal zone reference is passed, it will fetch its * temperature and fill @temp. * * Return: On success returns 0, an error code otherwise */ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) { int ret; if (IS_ERR_OR_NULL(tz)) return -EINVAL; mutex_lock(&tz->lock); if (!tz->ops->get_temp) { ret = -EINVAL; goto unlock; } if (device_is_registered(&tz->device)) ret = __thermal_zone_get_temp(tz, temp); else ret = -ENODEV; unlock: mutex_unlock(&tz->lock); return ret; } EXPORT_SYMBOL_GPL(thermal_zone_get_temp); static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev, int target) { if (cdev->ops->set_cur_state(cdev, target)) return; thermal_notify_cdev_state_update(cdev->id, target); thermal_cooling_device_stats_update(cdev, target); } void __thermal_cdev_update(struct thermal_cooling_device *cdev) { struct thermal_instance *instance; unsigned long target = 0; /* Make sure cdev enters the deepest cooling state */ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { dev_dbg(&cdev->device, "zone%d->target=%lu\n", instance->tz->id, instance->target); if (instance->target == THERMAL_NO_TARGET) continue; if (instance->target > target) target = instance->target; } thermal_cdev_set_cur_state(cdev, target); trace_cdev_update(cdev, target); dev_dbg(&cdev->device, "set to state %lu\n", target); } /** * thermal_cdev_update - update cooling device state if needed * @cdev: pointer to struct thermal_cooling_device * * Update the cooling device state if there is a need. */ void thermal_cdev_update(struct thermal_cooling_device *cdev) { mutex_lock(&cdev->lock); if (!cdev->updated) { __thermal_cdev_update(cdev); cdev->updated = true; } mutex_unlock(&cdev->lock); } /** * thermal_zone_get_slope - return the slope attribute of the thermal zone * @tz: thermal zone device with the slope attribute * * Return: If the thermal zone device has a slope attribute, return it, else * return 1. */ int thermal_zone_get_slope(struct thermal_zone_device *tz) { if (tz && tz->tzp) return tz->tzp->slope; return 1; } EXPORT_SYMBOL_GPL(thermal_zone_get_slope); /** * thermal_zone_get_offset - return the offset attribute of the thermal zone * @tz: thermal zone device with the offset attribute * * Return: If the thermal zone device has a offset attribute, return it, else * return 0. */ int thermal_zone_get_offset(struct thermal_zone_device *tz) { if (tz && tz->tzp) return tz->tzp->offset; return 0; } EXPORT_SYMBOL_GPL(thermal_zone_get_offset);
linux-master
drivers/thermal/thermal_helpers.c
// SPDX-License-Identifier: GPL-2.0+ /* * Amlogic Thermal Sensor Driver * * Copyright (C) 2017 Huan Biao <[email protected]> * Copyright (C) 2019 Guillaume La Roque <[email protected]> * * Register value to celsius temperature formulas: * Read_Val m * U * U = ---------, Uptat = --------- * 2^16 1 + n * U * * Temperature = A * ( Uptat + u_efuse / 2^16 )- B * * A B m n : calibration parameters * u_efuse : fused calibration value, it's a signed 16 bits value */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/thermal.h> #include "thermal_hwmon.h" #define TSENSOR_CFG_REG1 0x4 #define TSENSOR_CFG_REG1_RSET_VBG BIT(12) #define TSENSOR_CFG_REG1_RSET_ADC BIT(11) #define TSENSOR_CFG_REG1_VCM_EN BIT(10) #define TSENSOR_CFG_REG1_VBG_EN BIT(9) #define TSENSOR_CFG_REG1_OUT_CTL BIT(6) #define TSENSOR_CFG_REG1_FILTER_EN BIT(5) #define TSENSOR_CFG_REG1_DEM_EN BIT(3) #define TSENSOR_CFG_REG1_CH_SEL GENMASK(1, 0) #define TSENSOR_CFG_REG1_ENABLE \ (TSENSOR_CFG_REG1_FILTER_EN | \ TSENSOR_CFG_REG1_VCM_EN | \ TSENSOR_CFG_REG1_VBG_EN | \ TSENSOR_CFG_REG1_DEM_EN | \ TSENSOR_CFG_REG1_CH_SEL) #define TSENSOR_STAT0 0x40 #define TSENSOR_STAT9 0x64 #define TSENSOR_READ_TEMP_MASK GENMASK(15, 0) #define TSENSOR_TEMP_MASK GENMASK(11, 0) #define TSENSOR_TRIM_SIGN_MASK BIT(15) #define TSENSOR_TRIM_TEMP_MASK GENMASK(14, 0) #define TSENSOR_TRIM_VERSION_MASK GENMASK(31, 24) #define TSENSOR_TRIM_VERSION(_version) \ FIELD_GET(TSENSOR_TRIM_VERSION_MASK, _version) #define TSENSOR_TRIM_CALIB_VALID_MASK (GENMASK(3, 2) | BIT(7)) #define TSENSOR_CALIB_OFFSET 1 #define TSENSOR_CALIB_SHIFT 4 /** * struct amlogic_thermal_soc_calib_data * @A: calibration parameters * @B: calibration parameters * @m: calibration parameters * @n: calibration parameters * * This structure is required for configuration of amlogic thermal driver. */ struct amlogic_thermal_soc_calib_data { int A; int B; int m; int n; }; /** * struct amlogic_thermal_data * @u_efuse_off: register offset to read fused calibration value * @calibration_parameters: calibration parameters structure pointer * @regmap_config: regmap config for the device * This structure is required for configuration of amlogic thermal driver. */ struct amlogic_thermal_data { int u_efuse_off; const struct amlogic_thermal_soc_calib_data *calibration_parameters; const struct regmap_config *regmap_config; }; struct amlogic_thermal { struct platform_device *pdev; const struct amlogic_thermal_data *data; struct regmap *regmap; struct regmap *sec_ao_map; struct clk *clk; struct thermal_zone_device *tzd; u32 trim_info; }; /* * Calculate a temperature value from a temperature code. * The unit of the temperature is degree milliCelsius. */ static int amlogic_thermal_code_to_millicelsius(struct amlogic_thermal *pdata, int temp_code) { const struct amlogic_thermal_soc_calib_data *param = pdata->data->calibration_parameters; int temp; s64 factor, Uptat, uefuse; uefuse = pdata->trim_info & TSENSOR_TRIM_SIGN_MASK ? ~(pdata->trim_info & TSENSOR_TRIM_TEMP_MASK) + 1 : (pdata->trim_info & TSENSOR_TRIM_TEMP_MASK); factor = param->n * temp_code; factor = div_s64(factor, 100); Uptat = temp_code * param->m; Uptat = div_s64(Uptat, 100); Uptat = Uptat * BIT(16); Uptat = div_s64(Uptat, BIT(16) + factor); temp = (Uptat + uefuse) * param->A; temp = div_s64(temp, BIT(16)); temp = (temp - param->B) * 100; return temp; } static int amlogic_thermal_initialize(struct amlogic_thermal *pdata) { int ret = 0; int ver; regmap_read(pdata->sec_ao_map, pdata->data->u_efuse_off, &pdata->trim_info); ver = TSENSOR_TRIM_VERSION(pdata->trim_info); if ((ver & TSENSOR_TRIM_CALIB_VALID_MASK) == 0) { ret = -EINVAL; dev_err(&pdata->pdev->dev, "tsensor thermal calibration not supported: 0x%x!\n", ver); } return ret; } static int amlogic_thermal_enable(struct amlogic_thermal *data) { int ret; ret = clk_prepare_enable(data->clk); if (ret) return ret; regmap_update_bits(data->regmap, TSENSOR_CFG_REG1, TSENSOR_CFG_REG1_ENABLE, TSENSOR_CFG_REG1_ENABLE); return 0; } static int amlogic_thermal_disable(struct amlogic_thermal *data) { regmap_update_bits(data->regmap, TSENSOR_CFG_REG1, TSENSOR_CFG_REG1_ENABLE, 0); clk_disable_unprepare(data->clk); return 0; } static int amlogic_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { unsigned int tval; struct amlogic_thermal *pdata = thermal_zone_device_priv(tz); if (!pdata) return -EINVAL; regmap_read(pdata->regmap, TSENSOR_STAT0, &tval); *temp = amlogic_thermal_code_to_millicelsius(pdata, tval & TSENSOR_READ_TEMP_MASK); return 0; } static const struct thermal_zone_device_ops amlogic_thermal_ops = { .get_temp = amlogic_thermal_get_temp, }; static const struct regmap_config amlogic_thermal_regmap_config_g12a = { .reg_bits = 8, .val_bits = 32, .reg_stride = 4, .max_register = TSENSOR_STAT9, }; static const struct amlogic_thermal_soc_calib_data amlogic_thermal_g12a = { .A = 9411, .B = 3159, .m = 424, .n = 324, }; static const struct amlogic_thermal_data amlogic_thermal_g12a_cpu_param = { .u_efuse_off = 0x128, .calibration_parameters = &amlogic_thermal_g12a, .regmap_config = &amlogic_thermal_regmap_config_g12a, }; static const struct amlogic_thermal_data amlogic_thermal_g12a_ddr_param = { .u_efuse_off = 0xf0, .calibration_parameters = &amlogic_thermal_g12a, .regmap_config = &amlogic_thermal_regmap_config_g12a, }; static const struct of_device_id of_amlogic_thermal_match[] = { { .compatible = "amlogic,g12a-ddr-thermal", .data = &amlogic_thermal_g12a_ddr_param, }, { .compatible = "amlogic,g12a-cpu-thermal", .data = &amlogic_thermal_g12a_cpu_param, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_amlogic_thermal_match); static int amlogic_thermal_probe(struct platform_device *pdev) { struct amlogic_thermal *pdata; struct device *dev = &pdev->dev; void __iomem *base; int ret; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; pdata->data = of_device_get_match_data(dev); pdata->pdev = pdev; platform_set_drvdata(pdev, pdata); base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); pdata->regmap = devm_regmap_init_mmio(dev, base, pdata->data->regmap_config); if (IS_ERR(pdata->regmap)) return PTR_ERR(pdata->regmap); pdata->clk = devm_clk_get(dev, NULL); if (IS_ERR(pdata->clk)) return dev_err_probe(dev, PTR_ERR(pdata->clk), "failed to get clock\n"); pdata->sec_ao_map = syscon_regmap_lookup_by_phandle (pdev->dev.of_node, "amlogic,ao-secure"); if (IS_ERR(pdata->sec_ao_map)) { dev_err(dev, "syscon regmap lookup failed.\n"); return PTR_ERR(pdata->sec_ao_map); } pdata->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, pdata, &amlogic_thermal_ops); if (IS_ERR(pdata->tzd)) { ret = PTR_ERR(pdata->tzd); dev_err(dev, "Failed to register tsensor: %d\n", ret); return ret; } devm_thermal_add_hwmon_sysfs(&pdev->dev, pdata->tzd); ret = amlogic_thermal_initialize(pdata); if (ret) return ret; ret = amlogic_thermal_enable(pdata); return ret; } static int amlogic_thermal_remove(struct platform_device *pdev) { struct amlogic_thermal *data = platform_get_drvdata(pdev); return amlogic_thermal_disable(data); } static int __maybe_unused amlogic_thermal_suspend(struct device *dev) { struct amlogic_thermal *data = dev_get_drvdata(dev); return amlogic_thermal_disable(data); } static int __maybe_unused amlogic_thermal_resume(struct device *dev) { struct amlogic_thermal *data = dev_get_drvdata(dev); return amlogic_thermal_enable(data); } static SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops, amlogic_thermal_suspend, amlogic_thermal_resume); static struct platform_driver amlogic_thermal_driver = { .driver = { .name = "amlogic_thermal", .pm = &amlogic_thermal_pm_ops, .of_match_table = of_amlogic_thermal_match, }, .probe = amlogic_thermal_probe, .remove = amlogic_thermal_remove, }; module_platform_driver(amlogic_thermal_driver); MODULE_AUTHOR("Guillaume La Roque <[email protected]>"); MODULE_DESCRIPTION("Amlogic thermal driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/amlogic_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014-2016, Fuzhou Rockchip Electronics Co., Ltd * Caesar Wang <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/thermal.h> #include <linux/mfd/syscon.h> #include <linux/pinctrl/consumer.h> /* * If the temperature over a period of time High, * the resulting TSHUT gave CRU module,let it reset the entire chip, * or via GPIO give PMIC. */ enum tshut_mode { TSHUT_MODE_CRU = 0, TSHUT_MODE_GPIO, }; /* * The system Temperature Sensors tshut(tshut) polarity * the bit 8 is tshut polarity. * 0: low active, 1: high active */ enum tshut_polarity { TSHUT_LOW_ACTIVE = 0, TSHUT_HIGH_ACTIVE, }; /* * The conversion table has the adc value and temperature. * ADC_DECREMENT: the adc value is of diminishing.(e.g. rk3288_code_table) * ADC_INCREMENT: the adc value is incremental.(e.g. rk3368_code_table) */ enum adc_sort_mode { ADC_DECREMENT = 0, ADC_INCREMENT, }; #include "thermal_hwmon.h" /** * struct chip_tsadc_table - hold information about chip-specific differences * @id: conversion table * @length: size of conversion table * @data_mask: mask to apply on data inputs * @mode: sort mode of this adc variant (incrementing or decrementing) */ struct chip_tsadc_table { const struct tsadc_table *id; unsigned int length; u32 data_mask; enum adc_sort_mode mode; }; /** * struct rockchip_tsadc_chip - hold the private data of tsadc chip * @chn_offset: the channel offset of the first channel * @chn_num: the channel number of tsadc chip * @tshut_temp: the hardware-controlled shutdown temperature value * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO) * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) * @initialize: SoC special initialize tsadc controller method * @irq_ack: clear the interrupt * @control: enable/disable method for the tsadc controller * @get_temp: get the temperature * @set_alarm_temp: set the high temperature interrupt * @set_tshut_temp: set the hardware-controlled shutdown temperature * @set_tshut_mode: set the hardware-controlled shutdown mode * @table: the chip-specific conversion table */ struct rockchip_tsadc_chip { /* The sensor id of chip correspond to the ADC channel */ int chn_offset; int chn_num; /* The hardware-controlled tshut property */ int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; /* Chip-wide methods */ void (*initialize)(struct regmap *grf, void __iomem *reg, enum tshut_polarity p); void (*irq_ack)(void __iomem *reg); void (*control)(void __iomem *reg, bool on); /* Per-sensor methods */ int (*get_temp)(const struct chip_tsadc_table *table, int chn, void __iomem *reg, int *temp); int (*set_alarm_temp)(const struct chip_tsadc_table *table, int chn, void __iomem *reg, int temp); int (*set_tshut_temp)(const struct chip_tsadc_table *table, int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); /* Per-table methods */ struct chip_tsadc_table table; }; /** * struct rockchip_thermal_sensor - hold the information of thermal sensor * @thermal: pointer to the platform/configuration data * @tzd: pointer to a thermal zone * @id: identifier of the thermal sensor */ struct rockchip_thermal_sensor { struct rockchip_thermal_data *thermal; struct thermal_zone_device *tzd; int id; }; /** * struct rockchip_thermal_data - hold the private data of thermal driver * @chip: pointer to the platform/configuration data * @pdev: platform device of thermal * @reset: the reset controller of tsadc * @sensors: array of thermal sensors * @clk: the controller clock is divided by the exteral 24MHz * @pclk: the advanced peripherals bus clock * @grf: the general register file will be used to do static set by software * @regs: the base address of tsadc controller * @tshut_temp: the hardware-controlled shutdown temperature value * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO) * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) */ struct rockchip_thermal_data { const struct rockchip_tsadc_chip *chip; struct platform_device *pdev; struct reset_control *reset; struct rockchip_thermal_sensor *sensors; struct clk *clk; struct clk *pclk; struct regmap *grf; void __iomem *regs; int tshut_temp; enum tshut_mode tshut_mode; enum tshut_polarity tshut_polarity; }; /* * TSADC Sensor Register description: * * TSADCV2_* are used for RK3288 SoCs, the other chips can reuse it. * TSADCV3_* are used for newer SoCs than RK3288. (e.g: RK3228, RK3399) * */ #define TSADCV2_USER_CON 0x00 #define TSADCV2_AUTO_CON 0x04 #define TSADCV2_INT_EN 0x08 #define TSADCV2_INT_PD 0x0c #define TSADCV3_AUTO_SRC_CON 0x0c #define TSADCV3_HT_INT_EN 0x14 #define TSADCV3_HSHUT_GPIO_INT_EN 0x18 #define TSADCV3_HSHUT_CRU_INT_EN 0x1c #define TSADCV3_INT_PD 0x24 #define TSADCV3_HSHUT_PD 0x28 #define TSADCV2_DATA(chn) (0x20 + (chn) * 0x04) #define TSADCV2_COMP_INT(chn) (0x30 + (chn) * 0x04) #define TSADCV2_COMP_SHUT(chn) (0x40 + (chn) * 0x04) #define TSADCV3_DATA(chn) (0x2c + (chn) * 0x04) #define TSADCV3_COMP_INT(chn) (0x6c + (chn) * 0x04) #define TSADCV3_COMP_SHUT(chn) (0x10c + (chn) * 0x04) #define TSADCV2_HIGHT_INT_DEBOUNCE 0x60 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE 0x64 #define TSADCV3_HIGHT_INT_DEBOUNCE 0x14c #define TSADCV3_HIGHT_TSHUT_DEBOUNCE 0x150 #define TSADCV2_AUTO_PERIOD 0x68 #define TSADCV2_AUTO_PERIOD_HT 0x6c #define TSADCV3_AUTO_PERIOD 0x154 #define TSADCV3_AUTO_PERIOD_HT 0x158 #define TSADCV2_AUTO_EN BIT(0) #define TSADCV2_AUTO_EN_MASK BIT(16) #define TSADCV2_AUTO_SRC_EN(chn) BIT(4 + (chn)) #define TSADCV3_AUTO_SRC_EN(chn) BIT(chn) #define TSADCV3_AUTO_SRC_EN_MASK(chn) BIT(16 + chn) #define TSADCV2_AUTO_TSHUT_POLARITY_HIGH BIT(8) #define TSADCV2_AUTO_TSHUT_POLARITY_MASK BIT(24) #define TSADCV3_AUTO_Q_SEL_EN BIT(1) #define TSADCV2_INT_SRC_EN(chn) BIT(chn) #define TSADCV2_INT_SRC_EN_MASK(chn) BIT(16 + (chn)) #define TSADCV2_SHUT_2GPIO_SRC_EN(chn) BIT(4 + (chn)) #define TSADCV2_SHUT_2CRU_SRC_EN(chn) BIT(8 + (chn)) #define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) #define TSADCV3_INT_PD_CLEAR_MASK ~BIT(16) #define TSADCV4_INT_PD_CLEAR_MASK 0xffffffff #define TSADCV2_DATA_MASK 0xfff #define TSADCV3_DATA_MASK 0x3ff #define TSADCV4_DATA_MASK 0x1ff #define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 #define TSADCV2_AUTO_PERIOD_TIME 250 /* 250ms */ #define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* 50ms */ #define TSADCV3_AUTO_PERIOD_TIME 1875 /* 2.5ms */ #define TSADCV3_AUTO_PERIOD_HT_TIME 1875 /* 2.5ms */ #define TSADCV5_AUTO_PERIOD_TIME 1622 /* 2.5ms */ #define TSADCV5_AUTO_PERIOD_HT_TIME 1622 /* 2.5ms */ #define TSADCV6_AUTO_PERIOD_TIME 5000 /* 2.5ms */ #define TSADCV6_AUTO_PERIOD_HT_TIME 5000 /* 2.5ms */ #define TSADCV2_USER_INTER_PD_SOC 0x340 /* 13 clocks */ #define TSADCV5_USER_INTER_PD_SOC 0xfc0 /* 97us, at least 90us */ #define GRF_SARADC_TESTBIT 0x0e644 #define GRF_TSADC_TESTBIT_L 0x0e648 #define GRF_TSADC_TESTBIT_H 0x0e64c #define PX30_GRF_SOC_CON2 0x0408 #define RK3568_GRF_TSADC_CON 0x0600 #define RK3568_GRF_TSADC_ANA_REG0 (0x10001 << 0) #define RK3568_GRF_TSADC_ANA_REG1 (0x10001 << 1) #define RK3568_GRF_TSADC_ANA_REG2 (0x10001 << 2) #define RK3568_GRF_TSADC_TSEN (0x10001 << 8) #define RK3588_GRF0_TSADC_CON 0x0100 #define RK3588_GRF0_TSADC_TRM (0xff0077 << 0) #define RK3588_GRF0_TSADC_SHUT_2CRU (0x30003 << 10) #define RK3588_GRF0_TSADC_SHUT_2GPIO (0x70007 << 12) #define GRF_SARADC_TESTBIT_ON (0x10001 << 2) #define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2) #define GRF_TSADC_VCM_EN_L (0x10001 << 7) #define GRF_TSADC_VCM_EN_H (0x10001 << 7) #define GRF_CON_TSADC_CH_INV (0x10001 << 1) /** * struct tsadc_table - code to temperature conversion table * @code: the value of adc channel * @temp: the temperature * Note: * code to temperature mapping of the temperature sensor is a piece wise linear * curve.Any temperature, code faling between to 2 give temperatures can be * linearly interpolated. * Code to Temperature mapping should be updated based on manufacturer results. */ struct tsadc_table { u32 code; int temp; }; static const struct tsadc_table rv1108_table[] = { {0, -40000}, {374, -40000}, {382, -35000}, {389, -30000}, {397, -25000}, {405, -20000}, {413, -15000}, {421, -10000}, {429, -5000}, {436, 0}, {444, 5000}, {452, 10000}, {460, 15000}, {468, 20000}, {476, 25000}, {483, 30000}, {491, 35000}, {499, 40000}, {507, 45000}, {515, 50000}, {523, 55000}, {531, 60000}, {539, 65000}, {547, 70000}, {555, 75000}, {562, 80000}, {570, 85000}, {578, 90000}, {586, 95000}, {594, 100000}, {602, 105000}, {610, 110000}, {618, 115000}, {626, 120000}, {634, 125000}, {TSADCV2_DATA_MASK, 125000}, }; static const struct tsadc_table rk3228_code_table[] = { {0, -40000}, {588, -40000}, {593, -35000}, {598, -30000}, {603, -25000}, {608, -20000}, {613, -15000}, {618, -10000}, {623, -5000}, {629, 0}, {634, 5000}, {639, 10000}, {644, 15000}, {649, 20000}, {654, 25000}, {660, 30000}, {665, 35000}, {670, 40000}, {675, 45000}, {681, 50000}, {686, 55000}, {691, 60000}, {696, 65000}, {702, 70000}, {707, 75000}, {712, 80000}, {717, 85000}, {723, 90000}, {728, 95000}, {733, 100000}, {738, 105000}, {744, 110000}, {749, 115000}, {754, 120000}, {760, 125000}, {TSADCV2_DATA_MASK, 125000}, }; static const struct tsadc_table rk3288_code_table[] = { {TSADCV2_DATA_MASK, -40000}, {3800, -40000}, {3792, -35000}, {3783, -30000}, {3774, -25000}, {3765, -20000}, {3756, -15000}, {3747, -10000}, {3737, -5000}, {3728, 0}, {3718, 5000}, {3708, 10000}, {3698, 15000}, {3688, 20000}, {3678, 25000}, {3667, 30000}, {3656, 35000}, {3645, 40000}, {3634, 45000}, {3623, 50000}, {3611, 55000}, {3600, 60000}, {3588, 65000}, {3575, 70000}, {3563, 75000}, {3550, 80000}, {3537, 85000}, {3524, 90000}, {3510, 95000}, {3496, 100000}, {3482, 105000}, {3467, 110000}, {3452, 115000}, {3437, 120000}, {3421, 125000}, {0, 125000}, }; static const struct tsadc_table rk3328_code_table[] = { {0, -40000}, {296, -40000}, {304, -35000}, {313, -30000}, {331, -20000}, {340, -15000}, {349, -10000}, {359, -5000}, {368, 0}, {378, 5000}, {388, 10000}, {398, 15000}, {408, 20000}, {418, 25000}, {429, 30000}, {440, 35000}, {451, 40000}, {462, 45000}, {473, 50000}, {485, 55000}, {496, 60000}, {508, 65000}, {521, 70000}, {533, 75000}, {546, 80000}, {559, 85000}, {572, 90000}, {586, 95000}, {600, 100000}, {614, 105000}, {629, 110000}, {644, 115000}, {659, 120000}, {675, 125000}, {TSADCV2_DATA_MASK, 125000}, }; static const struct tsadc_table rk3368_code_table[] = { {0, -40000}, {106, -40000}, {108, -35000}, {110, -30000}, {112, -25000}, {114, -20000}, {116, -15000}, {118, -10000}, {120, -5000}, {122, 0}, {124, 5000}, {126, 10000}, {128, 15000}, {130, 20000}, {132, 25000}, {134, 30000}, {136, 35000}, {138, 40000}, {140, 45000}, {142, 50000}, {144, 55000}, {146, 60000}, {148, 65000}, {150, 70000}, {152, 75000}, {154, 80000}, {156, 85000}, {158, 90000}, {160, 95000}, {162, 100000}, {163, 105000}, {165, 110000}, {167, 115000}, {169, 120000}, {171, 125000}, {TSADCV3_DATA_MASK, 125000}, }; static const struct tsadc_table rk3399_code_table[] = { {0, -40000}, {402, -40000}, {410, -35000}, {419, -30000}, {427, -25000}, {436, -20000}, {444, -15000}, {453, -10000}, {461, -5000}, {470, 0}, {478, 5000}, {487, 10000}, {496, 15000}, {504, 20000}, {513, 25000}, {521, 30000}, {530, 35000}, {538, 40000}, {547, 45000}, {555, 50000}, {564, 55000}, {573, 60000}, {581, 65000}, {590, 70000}, {599, 75000}, {607, 80000}, {616, 85000}, {624, 90000}, {633, 95000}, {642, 100000}, {650, 105000}, {659, 110000}, {668, 115000}, {677, 120000}, {685, 125000}, {TSADCV3_DATA_MASK, 125000}, }; static const struct tsadc_table rk3568_code_table[] = { {0, -40000}, {1584, -40000}, {1620, -35000}, {1652, -30000}, {1688, -25000}, {1720, -20000}, {1756, -15000}, {1788, -10000}, {1824, -5000}, {1856, 0}, {1892, 5000}, {1924, 10000}, {1956, 15000}, {1992, 20000}, {2024, 25000}, {2060, 30000}, {2092, 35000}, {2128, 40000}, {2160, 45000}, {2196, 50000}, {2228, 55000}, {2264, 60000}, {2300, 65000}, {2332, 70000}, {2368, 75000}, {2400, 80000}, {2436, 85000}, {2468, 90000}, {2500, 95000}, {2536, 100000}, {2572, 105000}, {2604, 110000}, {2636, 115000}, {2672, 120000}, {2704, 125000}, {TSADCV2_DATA_MASK, 125000}, }; static const struct tsadc_table rk3588_code_table[] = { {0, -40000}, {215, -40000}, {285, 25000}, {350, 85000}, {395, 125000}, {TSADCV4_DATA_MASK, 125000}, }; static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table, int temp) { int high, low, mid; unsigned long num; unsigned int denom; u32 error = table->data_mask; low = 0; high = (table->length - 1) - 1; /* ignore the last check for table */ mid = (high + low) / 2; /* Return mask code data when the temp is over table range */ if (temp < table->id[low].temp || temp > table->id[high].temp) goto exit; while (low <= high) { if (temp == table->id[mid].temp) return table->id[mid].code; else if (temp < table->id[mid].temp) high = mid - 1; else low = mid + 1; mid = (low + high) / 2; } /* * The conversion code granularity provided by the table. Let's * assume that the relationship between temperature and * analog value between 2 table entries is linear and interpolate * to produce less granular result. */ num = abs(table->id[mid + 1].code - table->id[mid].code); num *= temp - table->id[mid].temp; denom = table->id[mid + 1].temp - table->id[mid].temp; switch (table->mode) { case ADC_DECREMENT: return table->id[mid].code - (num / denom); case ADC_INCREMENT: return table->id[mid].code + (num / denom); default: pr_err("%s: unknown table mode: %d\n", __func__, table->mode); return error; } exit: pr_err("%s: invalid temperature, temp=%d error=%d\n", __func__, temp, error); return error; } static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table, u32 code, int *temp) { unsigned int low = 1; unsigned int high = table->length - 1; unsigned int mid = (low + high) / 2; unsigned int num; unsigned long denom; WARN_ON(table->length < 2); switch (table->mode) { case ADC_DECREMENT: code &= table->data_mask; if (code <= table->id[high].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { if (code >= table->id[mid].code && code < table->id[mid - 1].code) break; else if (code < table->id[mid].code) low = mid + 1; else high = mid - 1; mid = (low + high) / 2; } break; case ADC_INCREMENT: code &= table->data_mask; if (code < table->id[low].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { if (code <= table->id[mid].code && code > table->id[mid - 1].code) break; else if (code > table->id[mid].code) low = mid + 1; else high = mid - 1; mid = (low + high) / 2; } break; default: pr_err("%s: unknown table mode: %d\n", __func__, table->mode); return -EINVAL; } /* * The 5C granularity provided by the table is too much. Let's * assume that the relationship between sensor readings and * temperature between 2 table entries is linear and interpolate * to produce less granular result. */ num = table->id[mid].temp - table->id[mid - 1].temp; num *= abs(table->id[mid - 1].code - code); denom = abs(table->id[mid - 1].code - table->id[mid].code); *temp = table->id[mid - 1].temp + (num / denom); return 0; } /** * rk_tsadcv2_initialize - initialize TASDC Controller. * @grf: the general register file will be used to do static set by software * @regs: the base address of tsadc controller * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) * * (1) Set TSADC_V2_AUTO_PERIOD: * Configure the interleave between every two accessing of * TSADC in normal operation. * * (2) Set TSADCV2_AUTO_PERIOD_HT: * Configure the interleave between every two accessing of * TSADC after the temperature is higher than COM_SHUT or COM_INT. * * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE: * If the temperature is higher than COMP_INT or COMP_SHUT for * "debounce" times, TSADC controller will generate interrupt or TSHUT. */ static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs, enum tshut_polarity tshut_polarity) { if (tshut_polarity == TSHUT_HIGH_ACTIVE) writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH, regs + TSADCV2_AUTO_CON); else writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH, regs + TSADCV2_AUTO_CON); writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD); writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_INT_DEBOUNCE); writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME, regs + TSADCV2_AUTO_PERIOD_HT); writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); } /** * rk_tsadcv3_initialize - initialize TASDC Controller. * @grf: the general register file will be used to do static set by software * @regs: the base address of tsadc controller * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH) * * (1) The tsadc control power sequence. * * (2) Set TSADC_V2_AUTO_PERIOD: * Configure the interleave between every two accessing of * TSADC in normal operation. * * (2) Set TSADCV2_AUTO_PERIOD_HT: * Configure the interleave between every two accessing of * TSADC after the temperature is higher than COM_SHUT or COM_INT. * * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE: * If the temperature is higher than COMP_INT or COMP_SHUT for * "debounce" times, TSADC controller will generate interrupt or TSHUT. */ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs, enum tshut_polarity tshut_polarity) { /* The tsadc control power sequence */ if (IS_ERR(grf)) { /* Set interleave value to workround ic time sync issue */ writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs + TSADCV2_USER_CON); writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD); writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_INT_DEBOUNCE); writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME, regs + TSADCV2_AUTO_PERIOD_HT); writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); } else { /* Enable the voltage common mode feature */ regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_VCM_EN_L); regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_VCM_EN_H); usleep_range(15, 100); /* The spec note says at least 15 us */ regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON); regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON); usleep_range(90, 200); /* The spec note says at least 90 us */ writel_relaxed(TSADCV3_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD); writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_INT_DEBOUNCE); writel_relaxed(TSADCV3_AUTO_PERIOD_HT_TIME, regs + TSADCV2_AUTO_PERIOD_HT); writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); } if (tshut_polarity == TSHUT_HIGH_ACTIVE) writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH, regs + TSADCV2_AUTO_CON); else writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH, regs + TSADCV2_AUTO_CON); } static void rk_tsadcv4_initialize(struct regmap *grf, void __iomem *regs, enum tshut_polarity tshut_polarity) { rk_tsadcv2_initialize(grf, regs, tshut_polarity); regmap_write(grf, PX30_GRF_SOC_CON2, GRF_CON_TSADC_CH_INV); } static void rk_tsadcv7_initialize(struct regmap *grf, void __iomem *regs, enum tshut_polarity tshut_polarity) { writel_relaxed(TSADCV5_USER_INTER_PD_SOC, regs + TSADCV2_USER_CON); writel_relaxed(TSADCV5_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD); writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_INT_DEBOUNCE); writel_relaxed(TSADCV5_AUTO_PERIOD_HT_TIME, regs + TSADCV2_AUTO_PERIOD_HT); writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE); if (tshut_polarity == TSHUT_HIGH_ACTIVE) writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH, regs + TSADCV2_AUTO_CON); else writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH, regs + TSADCV2_AUTO_CON); /* * The general register file will is optional * and might not be available. */ if (!IS_ERR(grf)) { regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_TSEN); /* * RK3568 TRM, section 18.5. requires a delay no less * than 10us between the rising edge of tsadc_tsen_en * and the rising edge of tsadc_ana_reg_0/1/2. */ udelay(15); regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG0); regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG1); regmap_write(grf, RK3568_GRF_TSADC_CON, RK3568_GRF_TSADC_ANA_REG2); /* * RK3568 TRM, section 18.5. requires a delay no less * than 90us after the rising edge of tsadc_ana_reg_0/1/2. */ usleep_range(100, 200); } } static void rk_tsadcv8_initialize(struct regmap *grf, void __iomem *regs, enum tshut_polarity tshut_polarity) { writel_relaxed(TSADCV6_AUTO_PERIOD_TIME, regs + TSADCV3_AUTO_PERIOD); writel_relaxed(TSADCV6_AUTO_PERIOD_HT_TIME, regs + TSADCV3_AUTO_PERIOD_HT); writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT, regs + TSADCV3_HIGHT_INT_DEBOUNCE); writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT, regs + TSADCV3_HIGHT_TSHUT_DEBOUNCE); if (tshut_polarity == TSHUT_HIGH_ACTIVE) writel_relaxed(TSADCV2_AUTO_TSHUT_POLARITY_HIGH | TSADCV2_AUTO_TSHUT_POLARITY_MASK, regs + TSADCV2_AUTO_CON); else writel_relaxed(TSADCV2_AUTO_TSHUT_POLARITY_MASK, regs + TSADCV2_AUTO_CON); } static void rk_tsadcv2_irq_ack(void __iomem *regs) { u32 val; val = readl_relaxed(regs + TSADCV2_INT_PD); writel_relaxed(val & TSADCV2_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD); } static void rk_tsadcv3_irq_ack(void __iomem *regs) { u32 val; val = readl_relaxed(regs + TSADCV2_INT_PD); writel_relaxed(val & TSADCV3_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD); } static void rk_tsadcv4_irq_ack(void __iomem *regs) { u32 val; val = readl_relaxed(regs + TSADCV3_INT_PD); writel_relaxed(val & TSADCV4_INT_PD_CLEAR_MASK, regs + TSADCV3_INT_PD); val = readl_relaxed(regs + TSADCV3_HSHUT_PD); writel_relaxed(val & TSADCV3_INT_PD_CLEAR_MASK, regs + TSADCV3_HSHUT_PD); } static void rk_tsadcv2_control(void __iomem *regs, bool enable) { u32 val; val = readl_relaxed(regs + TSADCV2_AUTO_CON); if (enable) val |= TSADCV2_AUTO_EN; else val &= ~TSADCV2_AUTO_EN; writel_relaxed(val, regs + TSADCV2_AUTO_CON); } /** * rk_tsadcv3_control - the tsadc controller is enabled or disabled. * @regs: the base address of tsadc controller * @enable: boolean flag to enable the controller * * NOTE: TSADC controller works at auto mode, and some SoCs need set the * tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output * adc value if setting this bit to enable. */ static void rk_tsadcv3_control(void __iomem *regs, bool enable) { u32 val; val = readl_relaxed(regs + TSADCV2_AUTO_CON); if (enable) val |= TSADCV2_AUTO_EN | TSADCV3_AUTO_Q_SEL_EN; else val &= ~TSADCV2_AUTO_EN; writel_relaxed(val, regs + TSADCV2_AUTO_CON); } static void rk_tsadcv4_control(void __iomem *regs, bool enable) { u32 val; if (enable) val = TSADCV2_AUTO_EN | TSADCV2_AUTO_EN_MASK; else val = TSADCV2_AUTO_EN_MASK; writel_relaxed(val, regs + TSADCV2_AUTO_CON); } static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int *temp) { u32 val; val = readl_relaxed(regs + TSADCV2_DATA(chn)); return rk_tsadcv2_code_to_temp(table, val, temp); } static int rk_tsadcv4_get_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int *temp) { u32 val; val = readl_relaxed(regs + TSADCV3_DATA(chn)); return rk_tsadcv2_code_to_temp(table, val, temp); } static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int temp) { u32 alarm_value; u32 int_en, int_clr; /* * In some cases, some sensors didn't need the trip points, the * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm * in the end, ignore this case and disable the high temperature * interrupt. */ if (temp == INT_MAX) { int_clr = readl_relaxed(regs + TSADCV2_INT_EN); int_clr &= ~TSADCV2_INT_SRC_EN(chn); writel_relaxed(int_clr, regs + TSADCV2_INT_EN); return 0; } /* Make sure the value is valid */ alarm_value = rk_tsadcv2_temp_to_code(table, temp); if (alarm_value == table->data_mask) return -ERANGE; writel_relaxed(alarm_value & table->data_mask, regs + TSADCV2_COMP_INT(chn)); int_en = readl_relaxed(regs + TSADCV2_INT_EN); int_en |= TSADCV2_INT_SRC_EN(chn); writel_relaxed(int_en, regs + TSADCV2_INT_EN); return 0; } static int rk_tsadcv3_alarm_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int temp) { u32 alarm_value; /* * In some cases, some sensors didn't need the trip points, the * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm * in the end, ignore this case and disable the high temperature * interrupt. */ if (temp == INT_MAX) { writel_relaxed(TSADCV2_INT_SRC_EN_MASK(chn), regs + TSADCV3_HT_INT_EN); return 0; } /* Make sure the value is valid */ alarm_value = rk_tsadcv2_temp_to_code(table, temp); if (alarm_value == table->data_mask) return -ERANGE; writel_relaxed(alarm_value & table->data_mask, regs + TSADCV3_COMP_INT(chn)); writel_relaxed(TSADCV2_INT_SRC_EN(chn) | TSADCV2_INT_SRC_EN_MASK(chn), regs + TSADCV3_HT_INT_EN); return 0; } static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int temp) { u32 tshut_value, val; /* Make sure the value is valid */ tshut_value = rk_tsadcv2_temp_to_code(table, temp); if (tshut_value == table->data_mask) return -ERANGE; writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ val = readl_relaxed(regs + TSADCV2_AUTO_CON); writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON); return 0; } static int rk_tsadcv3_tshut_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int temp) { u32 tshut_value; /* Make sure the value is valid */ tshut_value = rk_tsadcv2_temp_to_code(table, temp); if (tshut_value == table->data_mask) return -ERANGE; writel_relaxed(tshut_value, regs + TSADCV3_COMP_SHUT(chn)); /* TSHUT will be valid */ writel_relaxed(TSADCV3_AUTO_SRC_EN(chn) | TSADCV3_AUTO_SRC_EN_MASK(chn), regs + TSADCV3_AUTO_SRC_CON); return 0; } static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, enum tshut_mode mode) { u32 val; val = readl_relaxed(regs + TSADCV2_INT_EN); if (mode == TSHUT_MODE_GPIO) { val &= ~TSADCV2_SHUT_2CRU_SRC_EN(chn); val |= TSADCV2_SHUT_2GPIO_SRC_EN(chn); } else { val &= ~TSADCV2_SHUT_2GPIO_SRC_EN(chn); val |= TSADCV2_SHUT_2CRU_SRC_EN(chn); } writel_relaxed(val, regs + TSADCV2_INT_EN); } static void rk_tsadcv3_tshut_mode(int chn, void __iomem *regs, enum tshut_mode mode) { u32 val_gpio, val_cru; if (mode == TSHUT_MODE_GPIO) { val_gpio = TSADCV2_INT_SRC_EN(chn) | TSADCV2_INT_SRC_EN_MASK(chn); val_cru = TSADCV2_INT_SRC_EN_MASK(chn); } else { val_cru = TSADCV2_INT_SRC_EN(chn) | TSADCV2_INT_SRC_EN_MASK(chn); val_gpio = TSADCV2_INT_SRC_EN_MASK(chn); } writel_relaxed(val_gpio, regs + TSADCV3_HSHUT_GPIO_INT_EN); writel_relaxed(val_cru, regs + TSADCV3_HSHUT_CRU_INT_EN); } static const struct rockchip_tsadc_chip px30_tsadc_data = { /* cpu, gpu */ .chn_offset = 0, .chn_num = 2, /* 2 channels for tsadc */ .tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */ .tshut_temp = 95000, .initialize = rk_tsadcv4_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3328_code_table, .length = ARRAY_SIZE(rk3328_code_table), .data_mask = TSADCV2_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rv1108_tsadc_data = { /* cpu */ .chn_offset = 0, .chn_num = 1, /* one channel for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv2_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rv1108_table, .length = ARRAY_SIZE(rv1108_table), .data_mask = TSADCV2_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rk3228_tsadc_data = { /* cpu */ .chn_offset = 0, .chn_num = 1, /* one channel for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv2_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3228_code_table, .length = ARRAY_SIZE(rk3228_code_table), .data_mask = TSADCV3_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rk3288_tsadc_data = { /* cpu, gpu */ .chn_offset = 1, .chn_num = 2, /* two channels for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv2_initialize, .irq_ack = rk_tsadcv2_irq_ack, .control = rk_tsadcv2_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3288_code_table, .length = ARRAY_SIZE(rk3288_code_table), .data_mask = TSADCV2_DATA_MASK, .mode = ADC_DECREMENT, }, }; static const struct rockchip_tsadc_chip rk3328_tsadc_data = { /* cpu */ .chn_offset = 0, .chn_num = 1, /* one channels for tsadc */ .tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */ .tshut_temp = 95000, .initialize = rk_tsadcv2_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3328_code_table, .length = ARRAY_SIZE(rk3328_code_table), .data_mask = TSADCV2_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rk3366_tsadc_data = { /* cpu, gpu */ .chn_offset = 0, .chn_num = 2, /* two channels for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv3_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3228_code_table, .length = ARRAY_SIZE(rk3228_code_table), .data_mask = TSADCV3_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rk3368_tsadc_data = { /* cpu, gpu */ .chn_offset = 0, .chn_num = 2, /* two channels for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv2_initialize, .irq_ack = rk_tsadcv2_irq_ack, .control = rk_tsadcv2_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3368_code_table, .length = ARRAY_SIZE(rk3368_code_table), .data_mask = TSADCV3_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rk3399_tsadc_data = { /* cpu, gpu */ .chn_offset = 0, .chn_num = 2, /* two channels for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv3_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3399_code_table, .length = ARRAY_SIZE(rk3399_code_table), .data_mask = TSADCV3_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rk3568_tsadc_data = { /* cpu, gpu */ .chn_offset = 0, .chn_num = 2, /* two channels for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv7_initialize, .irq_ack = rk_tsadcv3_irq_ack, .control = rk_tsadcv3_control, .get_temp = rk_tsadcv2_get_temp, .set_alarm_temp = rk_tsadcv2_alarm_temp, .set_tshut_temp = rk_tsadcv2_tshut_temp, .set_tshut_mode = rk_tsadcv2_tshut_mode, .table = { .id = rk3568_code_table, .length = ARRAY_SIZE(rk3568_code_table), .data_mask = TSADCV2_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct rockchip_tsadc_chip rk3588_tsadc_data = { /* top, big_core0, big_core1, little_core, center, gpu, npu */ .chn_offset = 0, .chn_num = 7, /* seven channels for tsadc */ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ .tshut_temp = 95000, .initialize = rk_tsadcv8_initialize, .irq_ack = rk_tsadcv4_irq_ack, .control = rk_tsadcv4_control, .get_temp = rk_tsadcv4_get_temp, .set_alarm_temp = rk_tsadcv3_alarm_temp, .set_tshut_temp = rk_tsadcv3_tshut_temp, .set_tshut_mode = rk_tsadcv3_tshut_mode, .table = { .id = rk3588_code_table, .length = ARRAY_SIZE(rk3588_code_table), .data_mask = TSADCV4_DATA_MASK, .mode = ADC_INCREMENT, }, }; static const struct of_device_id of_rockchip_thermal_match[] = { { .compatible = "rockchip,px30-tsadc", .data = (void *)&px30_tsadc_data, }, { .compatible = "rockchip,rv1108-tsadc", .data = (void *)&rv1108_tsadc_data, }, { .compatible = "rockchip,rk3228-tsadc", .data = (void *)&rk3228_tsadc_data, }, { .compatible = "rockchip,rk3288-tsadc", .data = (void *)&rk3288_tsadc_data, }, { .compatible = "rockchip,rk3328-tsadc", .data = (void *)&rk3328_tsadc_data, }, { .compatible = "rockchip,rk3366-tsadc", .data = (void *)&rk3366_tsadc_data, }, { .compatible = "rockchip,rk3368-tsadc", .data = (void *)&rk3368_tsadc_data, }, { .compatible = "rockchip,rk3399-tsadc", .data = (void *)&rk3399_tsadc_data, }, { .compatible = "rockchip,rk3568-tsadc", .data = (void *)&rk3568_tsadc_data, }, { .compatible = "rockchip,rk3588-tsadc", .data = (void *)&rk3588_tsadc_data, }, { /* end */ }, }; MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); static void rockchip_thermal_toggle_sensor(struct rockchip_thermal_sensor *sensor, bool on) { struct thermal_zone_device *tzd = sensor->tzd; if (on) thermal_zone_device_enable(tzd); else thermal_zone_device_disable(tzd); } static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev) { struct rockchip_thermal_data *thermal = dev; int i; dev_dbg(&thermal->pdev->dev, "thermal alarm\n"); thermal->chip->irq_ack(thermal->regs); for (i = 0; i < thermal->chip->chn_num; i++) thermal_zone_device_update(thermal->sensors[i].tzd, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static int rockchip_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { struct rockchip_thermal_sensor *sensor = thermal_zone_device_priv(tz); struct rockchip_thermal_data *thermal = sensor->thermal; const struct rockchip_tsadc_chip *tsadc = thermal->chip; dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", __func__, sensor->id, low, high); return tsadc->set_alarm_temp(&tsadc->table, sensor->id, thermal->regs, high); } static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp) { struct rockchip_thermal_sensor *sensor = thermal_zone_device_priv(tz); struct rockchip_thermal_data *thermal = sensor->thermal; const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; retval = tsadc->get_temp(&tsadc->table, sensor->id, thermal->regs, out_temp); return retval; } static const struct thermal_zone_device_ops rockchip_of_thermal_ops = { .get_temp = rockchip_thermal_get_temp, .set_trips = rockchip_thermal_set_trips, }; static int rockchip_configure_from_dt(struct device *dev, struct device_node *np, struct rockchip_thermal_data *thermal) { u32 shut_temp, tshut_mode, tshut_polarity; if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { dev_warn(dev, "Missing tshut temp property, using default %d\n", thermal->chip->tshut_temp); thermal->tshut_temp = thermal->chip->tshut_temp; } else { if (shut_temp > INT_MAX) { dev_err(dev, "Invalid tshut temperature specified: %d\n", shut_temp); return -ERANGE; } thermal->tshut_temp = shut_temp; } if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) { dev_warn(dev, "Missing tshut mode property, using default (%s)\n", thermal->chip->tshut_mode == TSHUT_MODE_GPIO ? "gpio" : "cru"); thermal->tshut_mode = thermal->chip->tshut_mode; } else { thermal->tshut_mode = tshut_mode; } if (thermal->tshut_mode > 1) { dev_err(dev, "Invalid tshut mode specified: %d\n", thermal->tshut_mode); return -EINVAL; } if (of_property_read_u32(np, "rockchip,hw-tshut-polarity", &tshut_polarity)) { dev_warn(dev, "Missing tshut-polarity property, using default (%s)\n", thermal->chip->tshut_polarity == TSHUT_LOW_ACTIVE ? "low" : "high"); thermal->tshut_polarity = thermal->chip->tshut_polarity; } else { thermal->tshut_polarity = tshut_polarity; } if (thermal->tshut_polarity > 1) { dev_err(dev, "Invalid tshut-polarity specified: %d\n", thermal->tshut_polarity); return -EINVAL; } /* The tsadc wont to handle the error in here since some SoCs didn't * need this property. */ thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); if (IS_ERR(thermal->grf)) dev_warn(dev, "Missing rockchip,grf property\n"); return 0; } static int rockchip_thermal_register_sensor(struct platform_device *pdev, struct rockchip_thermal_data *thermal, struct rockchip_thermal_sensor *sensor, int id) { const struct rockchip_tsadc_chip *tsadc = thermal->chip; int error; tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, thermal->tshut_temp); if (error) dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", __func__, thermal->tshut_temp, error); sensor->thermal = thermal; sensor->id = id; sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, id, sensor, &rockchip_of_thermal_ops); if (IS_ERR(sensor->tzd)) { error = PTR_ERR(sensor->tzd); dev_err(&pdev->dev, "failed to register sensor %d: %d\n", id, error); return error; } return 0; } /** * rockchip_thermal_reset_controller - Reset TSADC Controller, reset all tsadc registers. * @reset: the reset controller of tsadc */ static void rockchip_thermal_reset_controller(struct reset_control *reset) { reset_control_assert(reset); usleep_range(10, 20); reset_control_deassert(reset); } static int rockchip_thermal_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct rockchip_thermal_data *thermal; int irq; int i; int error; irq = platform_get_irq(pdev, 0); if (irq < 0) return -EINVAL; thermal = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_thermal_data), GFP_KERNEL); if (!thermal) return -ENOMEM; thermal->pdev = pdev; thermal->chip = device_get_match_data(&pdev->dev); if (!thermal->chip) return -EINVAL; thermal->sensors = devm_kcalloc(&pdev->dev, thermal->chip->chn_num, sizeof(*thermal->sensors), GFP_KERNEL); if (!thermal->sensors) return -ENOMEM; thermal->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(thermal->regs)) return PTR_ERR(thermal->regs); thermal->reset = devm_reset_control_array_get_exclusive(&pdev->dev); if (IS_ERR(thermal->reset)) return dev_err_probe(&pdev->dev, PTR_ERR(thermal->reset), "failed to get tsadc reset.\n"); thermal->clk = devm_clk_get_enabled(&pdev->dev, "tsadc"); if (IS_ERR(thermal->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(thermal->clk), "failed to get tsadc clock.\n"); thermal->pclk = devm_clk_get_enabled(&pdev->dev, "apb_pclk"); if (IS_ERR(thermal->pclk)) return dev_err_probe(&pdev->dev, PTR_ERR(thermal->pclk), "failed to get apb_pclk clock.\n"); rockchip_thermal_reset_controller(thermal->reset); error = rockchip_configure_from_dt(&pdev->dev, np, thermal); if (error) return dev_err_probe(&pdev->dev, error, "failed to parse device tree data\n"); thermal->chip->initialize(thermal->grf, thermal->regs, thermal->tshut_polarity); for (i = 0; i < thermal->chip->chn_num; i++) { error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[i], thermal->chip->chn_offset + i); if (error) return dev_err_probe(&pdev->dev, error, "failed to register sensor[%d].\n", i); } error = devm_request_threaded_irq(&pdev->dev, irq, NULL, &rockchip_thermal_alarm_irq_thread, IRQF_ONESHOT, "rockchip_thermal", thermal); if (error) return dev_err_probe(&pdev->dev, error, "failed to request tsadc irq.\n"); thermal->chip->control(thermal->regs, true); for (i = 0; i < thermal->chip->chn_num; i++) { rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); error = thermal_add_hwmon_sysfs(thermal->sensors[i].tzd); if (error) dev_warn(&pdev->dev, "failed to register sensor %d with hwmon: %d\n", i, error); } platform_set_drvdata(pdev, thermal); return 0; } static int rockchip_thermal_remove(struct platform_device *pdev) { struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); int i; for (i = 0; i < thermal->chip->chn_num; i++) { struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; thermal_remove_hwmon_sysfs(sensor->tzd); rockchip_thermal_toggle_sensor(sensor, false); } thermal->chip->control(thermal->regs, false); return 0; } static int __maybe_unused rockchip_thermal_suspend(struct device *dev) { struct rockchip_thermal_data *thermal = dev_get_drvdata(dev); int i; for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); thermal->chip->control(thermal->regs, false); clk_disable(thermal->pclk); clk_disable(thermal->clk); pinctrl_pm_select_sleep_state(dev); return 0; } static int __maybe_unused rockchip_thermal_resume(struct device *dev) { struct rockchip_thermal_data *thermal = dev_get_drvdata(dev); int i; int error; error = clk_enable(thermal->clk); if (error) return error; error = clk_enable(thermal->pclk); if (error) { clk_disable(thermal->clk); return error; } rockchip_thermal_reset_controller(thermal->reset); thermal->chip->initialize(thermal->grf, thermal->regs, thermal->tshut_polarity); for (i = 0; i < thermal->chip->chn_num; i++) { int id = thermal->sensors[i].id; thermal->chip->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); error = thermal->chip->set_tshut_temp(&thermal->chip->table, id, thermal->regs, thermal->tshut_temp); if (error) dev_err(dev, "%s: invalid tshut=%d, error=%d\n", __func__, thermal->tshut_temp, error); } thermal->chip->control(thermal->regs, true); for (i = 0; i < thermal->chip->chn_num; i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); pinctrl_pm_select_default_state(dev); return 0; } static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops, rockchip_thermal_suspend, rockchip_thermal_resume); static struct platform_driver rockchip_thermal_driver = { .driver = { .name = "rockchip-thermal", .pm = &rockchip_thermal_pm_ops, .of_match_table = of_rockchip_thermal_match, }, .probe = rockchip_thermal_probe, .remove = rockchip_thermal_remove, }; module_platform_driver(rockchip_thermal_driver); MODULE_DESCRIPTION("ROCKCHIP THERMAL Driver"); MODULE_AUTHOR("Rockchip, Inc."); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:rockchip-thermal");
linux-master
drivers/thermal/rockchip_thermal.c
// SPDX-License-Identifier: GPL-2.0 /* * of-thermal.c - Generic Thermal Management device tree support. * * Copyright (C) 2013 Texas Instruments * Copyright (C) 2013 Eduardo Valentin <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/export.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/thermal.h> #include <linux/types.h> #include <linux/string.h> #include "thermal_core.h" /*** functions parsing device tree nodes ***/ static int of_find_trip_id(struct device_node *np, struct device_node *trip) { struct device_node *trips; struct device_node *t; int i = 0; trips = of_get_child_by_name(np, "trips"); if (!trips) { pr_err("Failed to find 'trips' node\n"); return -EINVAL; } /* * Find the trip id point associated with the cooling device map */ for_each_child_of_node(trips, t) { if (t == trip) { of_node_put(t); goto out; } i++; } i = -ENXIO; out: of_node_put(trips); return i; } /* * It maps 'enum thermal_trip_type' found in include/linux/thermal.h * into the device tree binding of 'trip', property type. */ static const char * const trip_types[] = { [THERMAL_TRIP_ACTIVE] = "active", [THERMAL_TRIP_PASSIVE] = "passive", [THERMAL_TRIP_HOT] = "hot", [THERMAL_TRIP_CRITICAL] = "critical", }; /** * thermal_of_get_trip_type - Get phy mode for given device_node * @np: Pointer to the given device_node * @type: Pointer to resulting trip type * * The function gets trip type string from property 'type', * and store its index in trip_types table in @type, * * Return: 0 on success, or errno in error case. */ static int thermal_of_get_trip_type(struct device_node *np, enum thermal_trip_type *type) { const char *t; int err, i; err = of_property_read_string(np, "type", &t); if (err < 0) return err; for (i = 0; i < ARRAY_SIZE(trip_types); i++) if (!strcasecmp(t, trip_types[i])) { *type = i; return 0; } return -ENODEV; } static int thermal_of_populate_trip(struct device_node *np, struct thermal_trip *trip) { int prop; int ret; ret = of_property_read_u32(np, "temperature", &prop); if (ret < 0) { pr_err("missing temperature property\n"); return ret; } trip->temperature = prop; ret = of_property_read_u32(np, "hysteresis", &prop); if (ret < 0) { pr_err("missing hysteresis property\n"); return ret; } trip->hysteresis = prop; ret = thermal_of_get_trip_type(np, &trip->type); if (ret < 0) { pr_err("wrong trip type property\n"); return ret; } return 0; } static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips) { struct thermal_trip *tt; struct device_node *trips, *trip; int ret, count; trips = of_get_child_by_name(np, "trips"); if (!trips) { pr_err("Failed to find 'trips' node\n"); return ERR_PTR(-EINVAL); } count = of_get_child_count(trips); if (!count) { pr_err("No trip point defined\n"); ret = -EINVAL; goto out_of_node_put; } tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL); if (!tt) { ret = -ENOMEM; goto out_of_node_put; } *ntrips = count; count = 0; for_each_child_of_node(trips, trip) { ret = thermal_of_populate_trip(trip, &tt[count++]); if (ret) goto out_kfree; } of_node_put(trips); return tt; out_kfree: kfree(tt); *ntrips = 0; out_of_node_put: of_node_put(trips); return ERR_PTR(ret); } static struct device_node *of_thermal_zone_find(struct device_node *sensor, int id) { struct device_node *np, *tz; struct of_phandle_args sensor_specs; np = of_find_node_by_name(NULL, "thermal-zones"); if (!np) { pr_debug("No thermal zones description\n"); return ERR_PTR(-ENODEV); } /* * Search for each thermal zone, a defined sensor * corresponding to the one passed as parameter */ for_each_available_child_of_node(np, tz) { int count, i; count = of_count_phandle_with_args(tz, "thermal-sensors", "#thermal-sensor-cells"); if (count <= 0) { pr_err("%pOFn: missing thermal sensor\n", tz); tz = ERR_PTR(-EINVAL); goto out; } for (i = 0; i < count; i++) { int ret; ret = of_parse_phandle_with_args(tz, "thermal-sensors", "#thermal-sensor-cells", i, &sensor_specs); if (ret < 0) { pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret); tz = ERR_PTR(ret); goto out; } if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ? sensor_specs.args[0] : 0)) { pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz); goto out; } } } tz = ERR_PTR(-ENODEV); out: of_node_put(np); return tz; } static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdelay) { int ret; ret = of_property_read_u32(np, "polling-delay-passive", pdelay); if (ret < 0) { pr_err("%pOFn: missing polling-delay-passive property\n", np); return ret; } ret = of_property_read_u32(np, "polling-delay", delay); if (ret < 0) { pr_err("%pOFn: missing polling-delay property\n", np); return ret; } return 0; } static void thermal_of_parameters_init(struct device_node *np, struct thermal_zone_params *tzp) { int coef[2]; int ncoef = ARRAY_SIZE(coef); int prop, ret; tzp->no_hwmon = true; if (!of_property_read_u32(np, "sustainable-power", &prop)) tzp->sustainable_power = prop; /* * For now, the thermal framework supports only one sensor per * thermal zone. Thus, we are considering only the first two * values as slope and offset. */ ret = of_property_read_u32_array(np, "coefficients", coef, ncoef); if (ret) { coef[0] = 1; coef[1] = 0; } tzp->slope = coef[0]; tzp->offset = coef[1]; } static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_device *tz) { struct device_node *np, *tz_np; np = of_find_node_by_name(NULL, "thermal-zones"); if (!np) return ERR_PTR(-ENODEV); tz_np = of_get_child_by_name(np, tz->type); of_node_put(np); if (!tz_np) return ERR_PTR(-ENODEV); return tz_np; } static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_id, struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { struct of_phandle_args cooling_spec; int ret; ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells", index, &cooling_spec); if (ret < 0) { pr_err("Invalid cooling-device entry\n"); return ret; } of_node_put(cooling_spec.np); if (cooling_spec.args_count < 2) { pr_err("wrong reference to cooling device, missing limits\n"); return -EINVAL; } if (cooling_spec.np != cdev->np) return 0; ret = thermal_zone_unbind_cooling_device(tz, trip_id, cdev); if (ret) pr_err("Failed to unbind '%s' with '%s': %d\n", tz->type, cdev->type, ret); return ret; } static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id, struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { struct of_phandle_args cooling_spec; int ret, weight = THERMAL_WEIGHT_DEFAULT; of_property_read_u32(map_np, "contribution", &weight); ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells", index, &cooling_spec); if (ret < 0) { pr_err("Invalid cooling-device entry\n"); return ret; } of_node_put(cooling_spec.np); if (cooling_spec.args_count < 2) { pr_err("wrong reference to cooling device, missing limits\n"); return -EINVAL; } if (cooling_spec.np != cdev->np) return 0; ret = thermal_zone_bind_cooling_device(tz, trip_id, cdev, cooling_spec.args[1], cooling_spec.args[0], weight); if (ret) pr_err("Failed to bind '%s' with '%s': %d\n", tz->type, cdev->type, ret); return ret; } static int thermal_of_for_each_cooling_device(struct device_node *tz_np, struct device_node *map_np, struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int (*action)(struct device_node *, int, int, struct thermal_zone_device *, struct thermal_cooling_device *)) { struct device_node *tr_np; int count, i, trip_id; tr_np = of_parse_phandle(map_np, "trip", 0); if (!tr_np) return -ENODEV; trip_id = of_find_trip_id(tz_np, tr_np); if (trip_id < 0) return trip_id; count = of_count_phandle_with_args(map_np, "cooling-device", "#cooling-cells"); if (count <= 0) { pr_err("Add a cooling_device property with at least one device\n"); return -ENOENT; } /* * At this point, we don't want to bail out when there is an * error, we will try to bind/unbind as many as possible * cooling devices */ for (i = 0; i < count; i++) action(map_np, i, trip_id, tz, cdev); return 0; } static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int (*action)(struct device_node *, int, int, struct thermal_zone_device *, struct thermal_cooling_device *)) { struct device_node *tz_np, *cm_np, *child; int ret = 0; tz_np = thermal_of_zone_get_by_name(tz); if (IS_ERR(tz_np)) { pr_err("Failed to get node tz by name\n"); return PTR_ERR(tz_np); } cm_np = of_get_child_by_name(tz_np, "cooling-maps"); if (!cm_np) goto out; for_each_child_of_node(cm_np, child) { ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action); if (ret) { of_node_put(child); break; } } of_node_put(cm_np); out: of_node_put(tz_np); return ret; } static int thermal_of_bind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_bind); } static int thermal_of_unbind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_unbind); } /** * thermal_of_zone_unregister - Cleanup the specific allocated ressources * * This function disables the thermal zone and frees the different * ressources allocated specific to the thermal OF. * * @tz: a pointer to the thermal zone structure */ static void thermal_of_zone_unregister(struct thermal_zone_device *tz) { struct thermal_trip *trips = tz->trips; struct thermal_zone_device_ops *ops = tz->ops; thermal_zone_device_disable(tz); thermal_zone_device_unregister(tz); kfree(trips); kfree(ops); } /** * thermal_of_zone_register - Register a thermal zone with device node * sensor * * The thermal_of_zone_register() parses a device tree given a device * node sensor and identifier. It searches for the thermal zone * associated to the couple sensor/id and retrieves all the thermal * zone properties and registers new thermal zone with those * properties. * * @sensor: A device node pointer corresponding to the sensor in the device tree * @id: An integer as sensor identifier * @data: A private data to be stored in the thermal zone dedicated private area * @ops: A set of thermal sensor ops * * Return: a valid thermal zone structure pointer on success. * - EINVAL: if the device tree thermal description is malformed * - ENOMEM: if one structure can not be allocated * - Other negative errors are returned by the underlying called functions */ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data, const struct thermal_zone_device_ops *ops) { struct thermal_zone_device *tz; struct thermal_trip *trips; struct thermal_zone_params tzp = {}; struct thermal_zone_device_ops *of_ops; struct device_node *np; int delay, pdelay; int ntrips, mask; int ret; of_ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL); if (!of_ops) return ERR_PTR(-ENOMEM); np = of_thermal_zone_find(sensor, id); if (IS_ERR(np)) { if (PTR_ERR(np) != -ENODEV) pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id); ret = PTR_ERR(np); goto out_kfree_of_ops; } trips = thermal_of_trips_init(np, &ntrips); if (IS_ERR(trips)) { pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id); ret = PTR_ERR(trips); goto out_kfree_of_ops; } ret = thermal_of_monitor_init(np, &delay, &pdelay); if (ret) { pr_err("Failed to initialize monitoring delays from %pOFn\n", np); goto out_kfree_trips; } thermal_of_parameters_init(np, &tzp); of_ops->bind = thermal_of_bind; of_ops->unbind = thermal_of_unbind; mask = GENMASK_ULL((ntrips) - 1, 0); tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips, mask, data, of_ops, &tzp, pdelay, delay); if (IS_ERR(tz)) { ret = PTR_ERR(tz); pr_err("Failed to register thermal zone %pOFn: %d\n", np, ret); goto out_kfree_trips; } ret = thermal_zone_device_enable(tz); if (ret) { pr_err("Failed to enabled thermal zone '%s', id=%d: %d\n", tz->type, tz->id, ret); thermal_of_zone_unregister(tz); return ERR_PTR(ret); } return tz; out_kfree_trips: kfree(trips); out_kfree_of_ops: kfree(of_ops); return ERR_PTR(ret); } static void devm_thermal_of_zone_release(struct device *dev, void *res) { thermal_of_zone_unregister(*(struct thermal_zone_device **)res); } static int devm_thermal_of_zone_match(struct device *dev, void *res, void *data) { struct thermal_zone_device **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; } /** * devm_thermal_of_zone_register - register a thermal tied with the sensor life cycle * * This function is the device version of the thermal_of_zone_register() function. * * @dev: a device structure pointer to sensor to be tied with the thermal zone OF life cycle * @sensor_id: the sensor identifier * @data: a pointer to a private data to be stored in the thermal zone 'devdata' field * @ops: a pointer to the ops structure associated with the sensor */ struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int sensor_id, void *data, const struct thermal_zone_device_ops *ops) { struct thermal_zone_device **ptr, *tzd; ptr = devres_alloc(devm_thermal_of_zone_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); tzd = thermal_of_zone_register(dev->of_node, sensor_id, data, ops); if (IS_ERR(tzd)) { devres_free(ptr); return tzd; } *ptr = tzd; devres_add(dev, ptr); return tzd; } EXPORT_SYMBOL_GPL(devm_thermal_of_zone_register); /** * devm_thermal_of_zone_unregister - Resource managed version of * thermal_of_zone_unregister(). * @dev: Device for which which resource was allocated. * @tz: a pointer to struct thermal_zone where the sensor is registered. * * This function removes the sensor callbacks and private data from the * thermal zone device registered with devm_thermal_zone_of_sensor_register() * API. It will also silent the zone by remove the .get_temp() and .get_trend() * thermal zone device callbacks. * Normally this function will not need to be called and the resource * management code will ensure that the resource is freed. */ void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz) { WARN_ON(devres_release(dev, devm_thermal_of_zone_release, devm_thermal_of_zone_match, tz)); } EXPORT_SYMBOL_GPL(devm_thermal_of_zone_unregister);
linux-master
drivers/thermal/thermal_of.c
// SPDX-License-Identifier: GPL-2.0-only /* * SPEAr thermal driver. * * Copyright (C) 2011-2012 ST Microelectronics * Author: Vincenzo Frascino <[email protected]> */ #include <linux/clk.h> #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/thermal.h> #define MD_FACTOR 1000 /* SPEAr Thermal Sensor Dev Structure */ struct spear_thermal_dev { /* pointer to base address of the thermal sensor */ void __iomem *thermal_base; /* clk structure */ struct clk *clk; /* pointer to thermal flags */ unsigned int flags; }; static inline int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) { struct spear_thermal_dev *stdev = thermal_zone_device_priv(thermal); /* * Data are ready to be read after 628 usec from POWERDOWN signal * (PDN) = 1 */ *temp = (readl_relaxed(stdev->thermal_base) & 0x7F) * MD_FACTOR; return 0; } static struct thermal_zone_device_ops ops = { .get_temp = thermal_get_temp, }; static int __maybe_unused spear_thermal_suspend(struct device *dev) { struct thermal_zone_device *spear_thermal = dev_get_drvdata(dev); struct spear_thermal_dev *stdev = thermal_zone_device_priv(spear_thermal); unsigned int actual_mask = 0; /* Disable SPEAr Thermal Sensor */ actual_mask = readl_relaxed(stdev->thermal_base); writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base); clk_disable(stdev->clk); dev_info(dev, "Suspended.\n"); return 0; } static int __maybe_unused spear_thermal_resume(struct device *dev) { struct thermal_zone_device *spear_thermal = dev_get_drvdata(dev); struct spear_thermal_dev *stdev = thermal_zone_device_priv(spear_thermal); unsigned int actual_mask = 0; int ret = 0; ret = clk_enable(stdev->clk); if (ret) { dev_err(dev, "Can't enable clock\n"); return ret; } /* Enable SPEAr Thermal Sensor */ actual_mask = readl_relaxed(stdev->thermal_base); writel_relaxed(actual_mask | stdev->flags, stdev->thermal_base); dev_info(dev, "Resumed.\n"); return 0; } static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, spear_thermal_resume); static int spear_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *spear_thermal = NULL; struct spear_thermal_dev *stdev; struct device_node *np = pdev->dev.of_node; int ret = 0, val; if (!np || !of_property_read_u32(np, "st,thermal-flags", &val)) { dev_err(&pdev->dev, "Failed: DT Pdata not passed\n"); return -EINVAL; } stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL); if (!stdev) return -ENOMEM; /* Enable thermal sensor */ stdev->thermal_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(stdev->thermal_base)) return PTR_ERR(stdev->thermal_base); stdev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(stdev->clk)) { dev_err(&pdev->dev, "Can't get clock\n"); return PTR_ERR(stdev->clk); } ret = clk_enable(stdev->clk); if (ret) { dev_err(&pdev->dev, "Can't enable clock\n"); return ret; } stdev->flags = val; writel_relaxed(stdev->flags, stdev->thermal_base); spear_thermal = thermal_tripless_zone_device_register("spear_thermal", stdev, &ops, NULL); if (IS_ERR(spear_thermal)) { dev_err(&pdev->dev, "thermal zone device is NULL\n"); ret = PTR_ERR(spear_thermal); goto disable_clk; } ret = thermal_zone_device_enable(spear_thermal); if (ret) { dev_err(&pdev->dev, "Cannot enable thermal zone\n"); goto unregister_tzd; } platform_set_drvdata(pdev, spear_thermal); dev_info(&pdev->dev, "Thermal Sensor Loaded at: 0x%p.\n", stdev->thermal_base); return 0; unregister_tzd: thermal_zone_device_unregister(spear_thermal); disable_clk: clk_disable(stdev->clk); return ret; } static int spear_thermal_exit(struct platform_device *pdev) { unsigned int actual_mask = 0; struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); struct spear_thermal_dev *stdev = thermal_zone_device_priv(spear_thermal); thermal_zone_device_unregister(spear_thermal); /* Disable SPEAr Thermal Sensor */ actual_mask = readl_relaxed(stdev->thermal_base); writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base); clk_disable(stdev->clk); return 0; } static const struct of_device_id spear_thermal_id_table[] = { { .compatible = "st,thermal-spear1340" }, {} }; MODULE_DEVICE_TABLE(of, spear_thermal_id_table); static struct platform_driver spear_thermal_driver = { .probe = spear_thermal_probe, .remove = spear_thermal_exit, .driver = { .name = "spear_thermal", .pm = &spear_thermal_pm_ops, .of_match_table = spear_thermal_id_table, }, }; module_platform_driver(spear_thermal_driver); MODULE_AUTHOR("Vincenzo Frascino <[email protected]>"); MODULE_DESCRIPTION("SPEAr thermal driver"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/spear_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * intel_tcc.c - Library for Intel TCC (thermal control circuitry) MSR access * Copyright (c) 2022, Intel Corporation. */ #include <linux/errno.h> #include <linux/intel_tcc.h> #include <asm/msr.h> /** * intel_tcc_get_tjmax() - returns the default TCC activation Temperature * @cpu: cpu that the MSR should be run on, nagative value means any cpu. * * Get the TjMax value, which is the default thermal throttling or TCC * activation temperature in degrees C. * * Return: Tjmax value in degrees C on success, negative error code otherwise. */ int intel_tcc_get_tjmax(int cpu) { u32 low, high; int val, err; if (cpu < 0) err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &low, &high); else err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high); if (err) return err; val = (low >> 16) & 0xff; return val ? val : -ENODATA; } EXPORT_SYMBOL_NS_GPL(intel_tcc_get_tjmax, INTEL_TCC); /** * intel_tcc_get_offset() - returns the TCC Offset value to Tjmax * @cpu: cpu that the MSR should be run on, nagative value means any cpu. * * Get the TCC offset value to Tjmax. The effective thermal throttling or TCC * activation temperature equals "Tjmax" - "TCC Offset", in degrees C. * * Return: Tcc offset value in degrees C on success, negative error code otherwise. */ int intel_tcc_get_offset(int cpu) { u32 low, high; int err; if (cpu < 0) err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &low, &high); else err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high); if (err) return err; return (low >> 24) & 0x3f; } EXPORT_SYMBOL_NS_GPL(intel_tcc_get_offset, INTEL_TCC); /** * intel_tcc_set_offset() - set the TCC offset value to Tjmax * @cpu: cpu that the MSR should be run on, nagative value means any cpu. * @offset: TCC offset value in degree C * * Set the TCC Offset value to Tjmax. The effective thermal throttling or TCC * activation temperature equals "Tjmax" - "TCC Offset", in degree C. * * Return: On success returns 0, negative error code otherwise. */ int intel_tcc_set_offset(int cpu, int offset) { u32 low, high; int err; if (offset < 0 || offset > 0x3f) return -EINVAL; if (cpu < 0) err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &low, &high); else err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high); if (err) return err; /* MSR Locked */ if (low & BIT(31)) return -EPERM; low &= ~(0x3f << 24); low |= offset << 24; if (cpu < 0) return wrmsr_safe(MSR_IA32_TEMPERATURE_TARGET, low, high); else return wrmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, low, high); } EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC); /** * intel_tcc_get_temp() - returns the current temperature * @cpu: cpu that the MSR should be run on, nagative value means any cpu. * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor. * * Get the current temperature returned by the CPU core/package level * thermal sensor, in degrees C. * * Return: Temperature in degrees C on success, negative error code otherwise. */ int intel_tcc_get_temp(int cpu, bool pkg) { u32 low, high; u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; int tjmax, temp, err; tjmax = intel_tcc_get_tjmax(cpu); if (tjmax < 0) return tjmax; if (cpu < 0) err = rdmsr_safe(msr, &low, &high); else err = rdmsr_safe_on_cpu(cpu, msr, &low, &high); if (err) return err; /* Temperature is beyond the valid thermal sensor range */ if (!(low & BIT(31))) return -ENODATA; temp = tjmax - ((low >> 16) & 0x7f); /* Do not allow negative CPU temperature */ return temp >= 0 ? temp : -ENODATA; } EXPORT_SYMBOL_NS_GPL(intel_tcc_get_temp, INTEL_TCC);
linux-master
drivers/thermal/intel/intel_tcc.c
// SPDX-License-Identifier: GPL-2.0-only /* * intel_soc_dts_iosf.c * Copyright (c) 2015, Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitops.h> #include <linux/intel_tcc.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <asm/iosf_mbi.h> #include "intel_soc_dts_iosf.h" #define SOC_DTS_OFFSET_ENABLE 0xB0 #define SOC_DTS_OFFSET_TEMP 0xB1 #define SOC_DTS_OFFSET_PTPS 0xB2 #define SOC_DTS_OFFSET_PTTS 0xB3 #define SOC_DTS_OFFSET_PTTSS 0xB4 #define SOC_DTS_OFFSET_PTMC 0x80 #define SOC_DTS_TE_AUX0 0xB5 #define SOC_DTS_TE_AUX1 0xB6 #define SOC_DTS_AUX0_ENABLE_BIT BIT(0) #define SOC_DTS_AUX1_ENABLE_BIT BIT(1) #define SOC_DTS_CPU_MODULE0_ENABLE_BIT BIT(16) #define SOC_DTS_CPU_MODULE1_ENABLE_BIT BIT(17) #define SOC_DTS_TE_SCI_ENABLE BIT(9) #define SOC_DTS_TE_SMI_ENABLE BIT(10) #define SOC_DTS_TE_MSI_ENABLE BIT(11) #define SOC_DTS_TE_APICA_ENABLE BIT(14) #define SOC_DTS_PTMC_APIC_DEASSERT_BIT BIT(4) /* DTS encoding for TJ MAX temperature */ #define SOC_DTS_TJMAX_ENCODING 0x7F /* Mask for two trips in status bits */ #define SOC_DTS_TRIP_MASK 0x03 static int update_trip_temp(struct intel_soc_dts_sensors *sensors, int thres_index, int temp) { int status; u32 temp_out; u32 out; unsigned long update_ptps; u32 store_ptps; u32 store_ptmc; u32 store_te_out; u32 te_out; u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE; if (sensors->intr_type == INTEL_SOC_DTS_INTERRUPT_MSI) int_enable_bit |= SOC_DTS_TE_MSI_ENABLE; temp_out = (sensors->tj_max - temp) / 1000; status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTPS, &store_ptps); if (status) return status; update_ptps = store_ptps; bitmap_set_value8(&update_ptps, temp_out & 0xFF, thres_index * 8); out = update_ptps; status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTPS, out); if (status) return status; pr_debug("update_trip_temp PTPS = %x\n", out); status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTMC, &out); if (status) goto err_restore_ptps; store_ptmc = out; status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_TE_AUX0 + thres_index, &te_out); if (status) goto err_restore_ptmc; store_te_out = te_out; /* Enable for CPU module 0 and module 1 */ out |= (SOC_DTS_CPU_MODULE0_ENABLE_BIT | SOC_DTS_CPU_MODULE1_ENABLE_BIT); if (temp) { if (thres_index) out |= SOC_DTS_AUX1_ENABLE_BIT; else out |= SOC_DTS_AUX0_ENABLE_BIT; te_out |= int_enable_bit; } else { if (thres_index) out &= ~SOC_DTS_AUX1_ENABLE_BIT; else out &= ~SOC_DTS_AUX0_ENABLE_BIT; te_out &= ~int_enable_bit; } status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, out); if (status) goto err_restore_te_out; status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_TE_AUX0 + thres_index, te_out); if (status) goto err_restore_te_out; return 0; err_restore_te_out: iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, store_te_out); err_restore_ptmc: iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, store_ptmc); err_restore_ptps: iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTPS, store_ptps); /* Nothing we can do if restore fails */ return status; } static int configure_trip(struct intel_soc_dts_sensor_entry *dts, int thres_index, enum thermal_trip_type trip_type, int temp) { int ret; ret = update_trip_temp(dts->sensors, thres_index, temp); if (ret) return ret; dts->trips[thres_index].temperature = temp; dts->trips[thres_index].type = trip_type; return 0; } static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) { struct intel_soc_dts_sensor_entry *dts = thermal_zone_device_priv(tzd); struct intel_soc_dts_sensors *sensors = dts->sensors; int status; if (temp > sensors->tj_max) return -EINVAL; mutex_lock(&sensors->dts_update_lock); status = update_trip_temp(sensors, trip, temp); mutex_unlock(&sensors->dts_update_lock); return status; } static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) { int status; u32 out; struct intel_soc_dts_sensor_entry *dts = thermal_zone_device_priv(tzd); struct intel_soc_dts_sensors *sensors; unsigned long raw; sensors = dts->sensors; status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_TEMP, &out); if (status) return status; raw = out; out = bitmap_get_value8(&raw, dts->id * 8) - SOC_DTS_TJMAX_ENCODING; *temp = sensors->tj_max - out * 1000; return 0; } static struct thermal_zone_device_ops tzone_ops = { .get_temp = sys_get_curr_temp, .set_trip_temp = sys_set_trip_temp, }; static int soc_dts_enable(int id) { u32 out; int ret; ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_ENABLE, &out); if (ret) return ret; if (!(out & BIT(id))) { out |= BIT(id); ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_ENABLE, out); if (ret) return ret; } return ret; } static void remove_dts_thermal_zone(struct intel_soc_dts_sensor_entry *dts) { iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_ENABLE, dts->store_status); thermal_zone_device_unregister(dts->tzone); } static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts, bool critical_trip) { int writable_trip_cnt = SOC_MAX_DTS_TRIPS; char name[10]; unsigned long trip; int trip_mask; unsigned long ptps; u32 store_ptps; unsigned long i; int ret; /* Store status to restor on exit */ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_ENABLE, &dts->store_status); if (ret) goto err_ret; dts->id = id; if (critical_trip) writable_trip_cnt--; trip_mask = GENMASK(writable_trip_cnt - 1, 0); /* Check if the writable trip we provide is not used by BIOS */ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTPS, &store_ptps); if (ret) trip_mask = 0; else { ptps = store_ptps; for_each_set_clump8(i, trip, &ptps, writable_trip_cnt * 8) trip_mask &= ~BIT(i / 8); } dts->trip_mask = trip_mask; snprintf(name, sizeof(name), "soc_dts%d", id); dts->tzone = thermal_zone_device_register_with_trips(name, dts->trips, SOC_MAX_DTS_TRIPS, trip_mask, dts, &tzone_ops, NULL, 0, 0); if (IS_ERR(dts->tzone)) { ret = PTR_ERR(dts->tzone); goto err_ret; } ret = thermal_zone_device_enable(dts->tzone); if (ret) goto err_enable; ret = soc_dts_enable(id); if (ret) goto err_enable; return 0; err_enable: thermal_zone_device_unregister(dts->tzone); err_ret: return ret; } void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors) { u32 sticky_out; int status; u32 ptmc_out; unsigned long flags; spin_lock_irqsave(&sensors->intr_notify_lock, flags); status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTMC, &ptmc_out); ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT; status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTMC, ptmc_out); status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, SOC_DTS_OFFSET_PTTSS, &sticky_out); pr_debug("status %d PTTSS %x\n", status, sticky_out); if (sticky_out & SOC_DTS_TRIP_MASK) { int i; /* reset sticky bit */ status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, SOC_DTS_OFFSET_PTTSS, sticky_out); spin_unlock_irqrestore(&sensors->intr_notify_lock, flags); for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { pr_debug("TZD update for zone %d\n", i); thermal_zone_device_update(sensors->soc_dts[i].tzone, THERMAL_EVENT_UNSPECIFIED); } } else spin_unlock_irqrestore(&sensors->intr_notify_lock, flags); } EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_interrupt_handler); static void dts_trips_reset(struct intel_soc_dts_sensors *sensors, int dts_index) { configure_trip(&sensors->soc_dts[dts_index], 0, 0, 0); configure_trip(&sensors->soc_dts[dts_index], 1, 0, 0); } struct intel_soc_dts_sensors * intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type, bool critical_trip, int crit_offset) { struct intel_soc_dts_sensors *sensors; int tj_max; int ret; int i; if (!iosf_mbi_available()) return ERR_PTR(-ENODEV); tj_max = intel_tcc_get_tjmax(-1); if (tj_max < 0) return ERR_PTR(tj_max); sensors = kzalloc(sizeof(*sensors), GFP_KERNEL); if (!sensors) return ERR_PTR(-ENOMEM); spin_lock_init(&sensors->intr_notify_lock); mutex_init(&sensors->dts_update_lock); sensors->intr_type = intr_type; sensors->tj_max = tj_max * 1000; for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { enum thermal_trip_type trip_type; int temp; sensors->soc_dts[i].sensors = sensors; ret = configure_trip(&sensors->soc_dts[i], 0, THERMAL_TRIP_PASSIVE, 0); if (ret) goto err_reset_trips; if (critical_trip) { trip_type = THERMAL_TRIP_CRITICAL; temp = sensors->tj_max - crit_offset; } else { trip_type = THERMAL_TRIP_PASSIVE; temp = 0; } ret = configure_trip(&sensors->soc_dts[i], 1, trip_type, temp); if (ret) goto err_reset_trips; } for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { ret = add_dts_thermal_zone(i, &sensors->soc_dts[i], critical_trip); if (ret) goto err_remove_zone; } return sensors; err_remove_zone: for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) remove_dts_thermal_zone(&sensors->soc_dts[i]); err_reset_trips: for (i = 0; i < SOC_MAX_DTS_SENSORS; i++) dts_trips_reset(sensors, i); kfree(sensors); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_init); void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors) { int i; for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { remove_dts_thermal_zone(&sensors->soc_dts[i]); dts_trips_reset(sensors, i); } kfree(sensors); } EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_exit); MODULE_IMPORT_NS(INTEL_TCC); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/intel_soc_dts_iosf.c
/* * intel_quark_dts_thermal.c * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Ong Boon Leong <[email protected]> * Intel Malaysia, Penang * * BSD LICENSE * * Copyright(c) 2015 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Quark DTS thermal driver is implemented by referencing * intel_soc_dts_thermal.c. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/thermal.h> #include <asm/cpu_device_id.h> #include <asm/iosf_mbi.h> /* DTS reset is programmed via QRK_MBI_UNIT_SOC */ #define QRK_DTS_REG_OFFSET_RESET 0x34 #define QRK_DTS_RESET_BIT BIT(0) /* DTS enable is programmed via QRK_MBI_UNIT_RMU */ #define QRK_DTS_REG_OFFSET_ENABLE 0xB0 #define QRK_DTS_ENABLE_BIT BIT(15) /* Temperature Register is read via QRK_MBI_UNIT_RMU */ #define QRK_DTS_REG_OFFSET_TEMP 0xB1 #define QRK_DTS_MASK_TEMP 0xFF #define QRK_DTS_OFFSET_TEMP 0 #define QRK_DTS_OFFSET_REL_TEMP 16 #define QRK_DTS_TEMP_BASE 50 /* Programmable Trip Point Register is configured via QRK_MBI_UNIT_RMU */ #define QRK_DTS_REG_OFFSET_PTPS 0xB2 #define QRK_DTS_MASK_TP_THRES 0xFF #define QRK_DTS_SHIFT_TP 8 #define QRK_DTS_ID_TP_CRITICAL 0 #define QRK_DTS_ID_TP_HOT 1 #define QRK_DTS_SAFE_TP_THRES 105 /* Thermal Sensor Register Lock */ #define QRK_DTS_REG_OFFSET_LOCK 0x71 #define QRK_DTS_LOCK_BIT BIT(5) /* Quark DTS has 2 trip points: hot & catastrophic */ #define QRK_MAX_DTS_TRIPS 2 /* If DTS not locked, all trip points are configurable */ #define QRK_DTS_WR_MASK_SET 0x3 /* If DTS locked, all trip points are not configurable */ #define QRK_DTS_WR_MASK_CLR 0 #define DEFAULT_POLL_DELAY 2000 struct soc_sensor_entry { bool locked; u32 store_ptps; u32 store_dts_enable; struct thermal_zone_device *tzone; struct thermal_trip trips[QRK_MAX_DTS_TRIPS]; }; static struct soc_sensor_entry *soc_dts; static int polling_delay = DEFAULT_POLL_DELAY; module_param(polling_delay, int, 0644); MODULE_PARM_DESC(polling_delay, "Polling interval for checking trip points (in milliseconds)"); static DEFINE_MUTEX(dts_update_mutex); static int soc_dts_enable(struct thermal_zone_device *tzd) { u32 out; struct soc_sensor_entry *aux_entry = thermal_zone_device_priv(tzd); int ret; ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_ENABLE, &out); if (ret) return ret; if (out & QRK_DTS_ENABLE_BIT) return 0; if (!aux_entry->locked) { out |= QRK_DTS_ENABLE_BIT; ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, QRK_DTS_REG_OFFSET_ENABLE, out); if (ret) return ret; } else { pr_info("DTS is locked. Cannot enable DTS\n"); ret = -EPERM; } return ret; } static int soc_dts_disable(struct thermal_zone_device *tzd) { u32 out; struct soc_sensor_entry *aux_entry = thermal_zone_device_priv(tzd); int ret; ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_ENABLE, &out); if (ret) return ret; if (!(out & QRK_DTS_ENABLE_BIT)) return 0; if (!aux_entry->locked) { out &= ~QRK_DTS_ENABLE_BIT; ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, QRK_DTS_REG_OFFSET_ENABLE, out); if (ret) return ret; } else { pr_info("DTS is locked. Cannot disable DTS\n"); ret = -EPERM; } return ret; } static int get_trip_temp(int trip) { int status, temp; u32 out; mutex_lock(&dts_update_mutex); status = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_PTPS, &out); mutex_unlock(&dts_update_mutex); if (status) return THERMAL_TEMP_INVALID; /* * Thermal Sensor Programmable Trip Point Register has 8-bit * fields for critical (catastrophic) and hot set trip point * thresholds. The threshold value is always offset by its * temperature base (50 degree Celsius). */ temp = (out >> (trip * QRK_DTS_SHIFT_TP)) & QRK_DTS_MASK_TP_THRES; temp -= QRK_DTS_TEMP_BASE; return temp; } static int update_trip_temp(struct soc_sensor_entry *aux_entry, int trip, int temp) { u32 out; u32 temp_out; u32 store_ptps; int ret; mutex_lock(&dts_update_mutex); if (aux_entry->locked) { ret = -EPERM; goto failed; } ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_PTPS, &store_ptps); if (ret) goto failed; /* * Protection against unsafe trip point thresdhold value. * As Quark X1000 data-sheet does not provide any recommendation * regarding the safe trip point threshold value to use, we choose * the safe value according to the threshold value set by UEFI BIOS. */ if (temp > QRK_DTS_SAFE_TP_THRES) temp = QRK_DTS_SAFE_TP_THRES; /* * Thermal Sensor Programmable Trip Point Register has 8-bit * fields for critical (catastrophic) and hot set trip point * thresholds. The threshold value is always offset by its * temperature base (50 degree Celsius). */ temp_out = temp + QRK_DTS_TEMP_BASE; out = (store_ptps & ~(QRK_DTS_MASK_TP_THRES << (trip * QRK_DTS_SHIFT_TP))); out |= (temp_out & QRK_DTS_MASK_TP_THRES) << (trip * QRK_DTS_SHIFT_TP); ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, QRK_DTS_REG_OFFSET_PTPS, out); failed: mutex_unlock(&dts_update_mutex); return ret; } static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) { return update_trip_temp(thermal_zone_device_priv(tzd), trip, temp); } static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) { u32 out; int ret; mutex_lock(&dts_update_mutex); ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_TEMP, &out); mutex_unlock(&dts_update_mutex); if (ret) return ret; /* * Thermal Sensor Temperature Register has 8-bit field * for temperature value (offset by temperature base * 50 degree Celsius). */ out = (out >> QRK_DTS_OFFSET_TEMP) & QRK_DTS_MASK_TEMP; *temp = out - QRK_DTS_TEMP_BASE; return 0; } static int sys_change_mode(struct thermal_zone_device *tzd, enum thermal_device_mode mode) { int ret; mutex_lock(&dts_update_mutex); if (mode == THERMAL_DEVICE_ENABLED) ret = soc_dts_enable(tzd); else ret = soc_dts_disable(tzd); mutex_unlock(&dts_update_mutex); return ret; } static struct thermal_zone_device_ops tzone_ops = { .get_temp = sys_get_curr_temp, .set_trip_temp = sys_set_trip_temp, .change_mode = sys_change_mode, }; static void free_soc_dts(struct soc_sensor_entry *aux_entry) { if (aux_entry) { if (!aux_entry->locked) { mutex_lock(&dts_update_mutex); iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, QRK_DTS_REG_OFFSET_ENABLE, aux_entry->store_dts_enable); iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE, QRK_DTS_REG_OFFSET_PTPS, aux_entry->store_ptps); mutex_unlock(&dts_update_mutex); } thermal_zone_device_unregister(aux_entry->tzone); kfree(aux_entry); } } static struct soc_sensor_entry *alloc_soc_dts(void) { struct soc_sensor_entry *aux_entry; int err; u32 out; int wr_mask; aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL); if (!aux_entry) { err = -ENOMEM; return ERR_PTR(-ENOMEM); } /* Check if DTS register is locked */ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_LOCK, &out); if (err) goto err_ret; if (out & QRK_DTS_LOCK_BIT) { aux_entry->locked = true; wr_mask = QRK_DTS_WR_MASK_CLR; } else { aux_entry->locked = false; wr_mask = QRK_DTS_WR_MASK_SET; } /* Store DTS default state if DTS registers are not locked */ if (!aux_entry->locked) { /* Store DTS default enable for restore on exit */ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_ENABLE, &aux_entry->store_dts_enable); if (err) goto err_ret; /* Store DTS default PTPS register for restore on exit */ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ, QRK_DTS_REG_OFFSET_PTPS, &aux_entry->store_ptps); if (err) goto err_ret; } aux_entry->trips[QRK_DTS_ID_TP_CRITICAL].temperature = get_trip_temp(QRK_DTS_ID_TP_CRITICAL); aux_entry->trips[QRK_DTS_ID_TP_CRITICAL].type = THERMAL_TRIP_CRITICAL; aux_entry->trips[QRK_DTS_ID_TP_HOT].temperature = get_trip_temp(QRK_DTS_ID_TP_HOT); aux_entry->trips[QRK_DTS_ID_TP_HOT].type = THERMAL_TRIP_HOT; aux_entry->tzone = thermal_zone_device_register_with_trips("quark_dts", aux_entry->trips, QRK_MAX_DTS_TRIPS, wr_mask, aux_entry, &tzone_ops, NULL, 0, polling_delay); if (IS_ERR(aux_entry->tzone)) { err = PTR_ERR(aux_entry->tzone); goto err_ret; } err = thermal_zone_device_enable(aux_entry->tzone); if (err) goto err_aux_status; return aux_entry; err_aux_status: thermal_zone_device_unregister(aux_entry->tzone); err_ret: kfree(aux_entry); return ERR_PTR(err); } static const struct x86_cpu_id qrk_thermal_ids[] __initconst = { X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids); static int __init intel_quark_thermal_init(void) { if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available()) return -ENODEV; soc_dts = alloc_soc_dts(); if (IS_ERR(soc_dts)) return PTR_ERR(soc_dts); return 0; } static void __exit intel_quark_thermal_exit(void) { free_soc_dts(soc_dts); } module_init(intel_quark_thermal_init) module_exit(intel_quark_thermal_exit) MODULE_DESCRIPTION("Intel Quark DTS Thermal Driver"); MODULE_AUTHOR("Ong Boon Leong <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/thermal/intel/intel_quark_dts_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * intel_soc_dts_thermal.c * Copyright (c) 2014, Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> #include <linux/module.h> #include <linux/interrupt.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include "intel_soc_dts_iosf.h" #define CRITICAL_OFFSET_FROM_TJ_MAX 5000 static int crit_offset = CRITICAL_OFFSET_FROM_TJ_MAX; module_param(crit_offset, int, 0644); MODULE_PARM_DESC(crit_offset, "Critical Temperature offset from tj max in millidegree Celsius."); /* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */ #define BYT_SOC_DTS_APIC_IRQ 86 static int soc_dts_thres_gsi; static int soc_dts_thres_irq; static struct intel_soc_dts_sensors *soc_dts; static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data) { pr_debug("proc_thermal_interrupt\n"); intel_soc_dts_iosf_interrupt_handler(soc_dts); return IRQ_HANDLED; } static const struct x86_cpu_id soc_thermal_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ), {} }; MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); static int __init intel_soc_thermal_init(void) { int err = 0; const struct x86_cpu_id *match_cpu; match_cpu = x86_match_cpu(soc_thermal_ids); if (!match_cpu) return -ENODEV; /* Create a zone with 2 trips with marked as read only */ soc_dts = intel_soc_dts_iosf_init(INTEL_SOC_DTS_INTERRUPT_APIC, true, crit_offset); if (IS_ERR(soc_dts)) { err = PTR_ERR(soc_dts); return err; } soc_dts_thres_gsi = (int)match_cpu->driver_data; if (soc_dts_thres_gsi) { /* * Note the flags here MUST match the firmware defaults, rather * then the request_irq flags, otherwise we get an EBUSY error. */ soc_dts_thres_irq = acpi_register_gsi(NULL, soc_dts_thres_gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); if (soc_dts_thres_irq < 0) { pr_warn("intel_soc_dts: Could not get IRQ for GSI %d, err %d\n", soc_dts_thres_gsi, soc_dts_thres_irq); soc_dts_thres_irq = 0; } } if (soc_dts_thres_irq) { err = request_threaded_irq(soc_dts_thres_irq, NULL, soc_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "soc_dts", soc_dts); if (err) { /* * Do not just error out because the user space thermal * daemon such as DPTF may use polling instead of being * interrupt driven. */ pr_warn("request_threaded_irq ret %d\n", err); } } return 0; } static void __exit intel_soc_thermal_exit(void) { if (soc_dts_thres_irq) { free_irq(soc_dts_thres_irq, soc_dts); acpi_unregister_gsi(soc_dts_thres_gsi); } intel_soc_dts_iosf_exit(soc_dts); } module_init(intel_soc_thermal_init) module_exit(intel_soc_thermal_exit) MODULE_DESCRIPTION("Intel SoC DTS Thermal Driver"); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/intel_soc_dts_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Thermal throttle event support code (such as syslog messaging and rate * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). * * This allows consistent reporting of CPU thermal throttle events. * * Maintains a counter in /sys that keeps track of the number of thermal * events, such that the user knows how bad the thermal problem might be * (since the logging to syslog is rate limited). * * Author: Dmitriy Zavin ([email protected]) * * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c. * Inspired by Ross Biro's and Al Borchers' counter code. */ #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/export.h> #include <linux/types.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/cpu.h> #include <asm/processor.h> #include <asm/thermal.h> #include <asm/traps.h> #include <asm/apic.h> #include <asm/irq.h> #include <asm/msr.h> #include "intel_hfi.h" #include "thermal_interrupt.h" /* How long to wait between reporting thermal events */ #define CHECK_INTERVAL (300 * HZ) #define THERMAL_THROTTLING_EVENT 0 #define POWER_LIMIT_EVENT 1 /** * struct _thermal_state - Represent the current thermal event state * @next_check: Stores the next timestamp, when it is allowed * to log the next warning message. * @last_interrupt_time: Stores the timestamp for the last threshold * high event. * @therm_work: Delayed workqueue structure * @count: Stores the current running count for thermal * or power threshold interrupts. * @last_count: Stores the previous running count for thermal * or power threshold interrupts. * @max_time_ms: This shows the maximum amount of time CPU was * in throttled state for a single thermal * threshold high to low state. * @total_time_ms: This is a cumulative time during which CPU was * in the throttled state. * @rate_control_active: Set when a throttling message is logged. * This is used for the purpose of rate-control. * @new_event: Stores the last high/low status of the * THERM_STATUS_PROCHOT or * THERM_STATUS_POWER_LIMIT. * @level: Stores whether this _thermal_state instance is * for a CORE level or for PACKAGE level. * @sample_index: Index for storing the next sample in the buffer * temp_samples[]. * @sample_count: Total number of samples collected in the buffer * temp_samples[]. * @average: The last moving average of temperature samples * @baseline_temp: Temperature at which thermal threshold high * interrupt was generated. * @temp_samples: Storage for temperature samples to calculate * moving average. * * This structure is used to represent data related to thermal state for a CPU. * There is a separate storage for core and package level for each CPU. */ struct _thermal_state { u64 next_check; u64 last_interrupt_time; struct delayed_work therm_work; unsigned long count; unsigned long last_count; unsigned long max_time_ms; unsigned long total_time_ms; bool rate_control_active; bool new_event; u8 level; u8 sample_index; u8 sample_count; u8 average; u8 baseline_temp; u8 temp_samples[3]; }; struct thermal_state { struct _thermal_state core_throttle; struct _thermal_state core_power_limit; struct _thermal_state package_throttle; struct _thermal_state package_power_limit; struct _thermal_state core_thresh0; struct _thermal_state core_thresh1; struct _thermal_state pkg_thresh0; struct _thermal_state pkg_thresh1; }; /* Callback to handle core threshold interrupts */ int (*platform_thermal_notify)(__u64 msr_val); EXPORT_SYMBOL(platform_thermal_notify); /* Callback to handle core package threshold_interrupts */ int (*platform_thermal_package_notify)(__u64 msr_val); EXPORT_SYMBOL_GPL(platform_thermal_package_notify); /* Callback support of rate control, return true, if * callback has rate control */ bool (*platform_thermal_package_rate_control)(void); EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control); static DEFINE_PER_CPU(struct thermal_state, thermal_state); static atomic_t therm_throt_en = ATOMIC_INIT(0); static u32 lvtthmr_init __read_mostly; #ifdef CONFIG_SYSFS #define define_therm_throt_device_one_ro(_name) \ static DEVICE_ATTR(_name, 0444, \ therm_throt_device_show_##_name, \ NULL) \ #define define_therm_throt_device_show_func(event, name) \ \ static ssize_t therm_throt_device_show_##event##_##name( \ struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ unsigned int cpu = dev->id; \ ssize_t ret; \ \ preempt_disable(); /* CPU hotplug */ \ if (cpu_online(cpu)) { \ ret = sprintf(buf, "%lu\n", \ per_cpu(thermal_state, cpu).event.name); \ } else \ ret = 0; \ preempt_enable(); \ \ return ret; \ } define_therm_throt_device_show_func(core_throttle, count); define_therm_throt_device_one_ro(core_throttle_count); define_therm_throt_device_show_func(core_power_limit, count); define_therm_throt_device_one_ro(core_power_limit_count); define_therm_throt_device_show_func(package_throttle, count); define_therm_throt_device_one_ro(package_throttle_count); define_therm_throt_device_show_func(package_power_limit, count); define_therm_throt_device_one_ro(package_power_limit_count); define_therm_throt_device_show_func(core_throttle, max_time_ms); define_therm_throt_device_one_ro(core_throttle_max_time_ms); define_therm_throt_device_show_func(package_throttle, max_time_ms); define_therm_throt_device_one_ro(package_throttle_max_time_ms); define_therm_throt_device_show_func(core_throttle, total_time_ms); define_therm_throt_device_one_ro(core_throttle_total_time_ms); define_therm_throt_device_show_func(package_throttle, total_time_ms); define_therm_throt_device_one_ro(package_throttle_total_time_ms); static struct attribute *thermal_throttle_attrs[] = { &dev_attr_core_throttle_count.attr, &dev_attr_core_throttle_max_time_ms.attr, &dev_attr_core_throttle_total_time_ms.attr, NULL }; static const struct attribute_group thermal_attr_group = { .attrs = thermal_throttle_attrs, .name = "thermal_throttle" }; #endif /* CONFIG_SYSFS */ #define THERM_THROT_POLL_INTERVAL HZ #define THERM_STATUS_PROCHOT_LOG BIT(1) static u64 therm_intr_core_clear_mask; static u64 therm_intr_pkg_clear_mask; static void thermal_intr_init_core_clear_mask(void) { if (therm_intr_core_clear_mask) return; /* * Reference: Intel SDM Volume 4 * "Table 2-2. IA-32 Architectural MSRs", MSR 0x19C * IA32_THERM_STATUS. */ /* * Bit 1, 3, 5: CPUID.01H:EDX[22] = 1. This driver will not * enable interrupts, when 0 as it checks for X86_FEATURE_ACPI. */ therm_intr_core_clear_mask = (BIT(1) | BIT(3) | BIT(5)); /* * Bit 7 and 9: Thermal Threshold #1 and #2 log * If CPUID.01H:ECX[8] = 1 */ if (boot_cpu_has(X86_FEATURE_TM2)) therm_intr_core_clear_mask |= (BIT(7) | BIT(9)); /* Bit 11: Power Limitation log (R/WC0) If CPUID.06H:EAX[4] = 1 */ if (boot_cpu_has(X86_FEATURE_PLN)) therm_intr_core_clear_mask |= BIT(11); /* * Bit 13: Current Limit log (R/WC0) If CPUID.06H:EAX[7] = 1 * Bit 15: Cross Domain Limit log (R/WC0) If CPUID.06H:EAX[7] = 1 */ if (boot_cpu_has(X86_FEATURE_HWP)) therm_intr_core_clear_mask |= (BIT(13) | BIT(15)); } static void thermal_intr_init_pkg_clear_mask(void) { if (therm_intr_pkg_clear_mask) return; /* * Reference: Intel SDM Volume 4 * "Table 2-2. IA-32 Architectural MSRs", MSR 0x1B1 * IA32_PACKAGE_THERM_STATUS. */ /* All bits except BIT 26 depend on CPUID.06H: EAX[6] = 1 */ if (boot_cpu_has(X86_FEATURE_PTS)) therm_intr_pkg_clear_mask = (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11)); /* * Intel SDM Volume 2A: Thermal and Power Management Leaf * Bit 26: CPUID.06H: EAX[19] = 1 */ if (boot_cpu_has(X86_FEATURE_HFI)) therm_intr_pkg_clear_mask |= BIT(26); } /* * Clear the bits in package thermal status register for bit = 1 * in bitmask */ void thermal_clear_package_intr_status(int level, u64 bit_mask) { u64 msr_val; int msr; if (level == CORE_LEVEL) { msr = MSR_IA32_THERM_STATUS; msr_val = therm_intr_core_clear_mask; } else { msr = MSR_IA32_PACKAGE_THERM_STATUS; msr_val = therm_intr_pkg_clear_mask; } msr_val &= ~bit_mask; wrmsrl(msr, msr_val); } EXPORT_SYMBOL_GPL(thermal_clear_package_intr_status); static void get_therm_status(int level, bool *proc_hot, u8 *temp) { int msr; u64 msr_val; if (level == CORE_LEVEL) msr = MSR_IA32_THERM_STATUS; else msr = MSR_IA32_PACKAGE_THERM_STATUS; rdmsrl(msr, msr_val); if (msr_val & THERM_STATUS_PROCHOT_LOG) *proc_hot = true; else *proc_hot = false; *temp = (msr_val >> 16) & 0x7F; } static void __maybe_unused throttle_active_work(struct work_struct *work) { struct _thermal_state *state = container_of(to_delayed_work(work), struct _thermal_state, therm_work); unsigned int i, avg, this_cpu = smp_processor_id(); u64 now = get_jiffies_64(); bool hot; u8 temp; get_therm_status(state->level, &hot, &temp); /* temperature value is offset from the max so lesser means hotter */ if (!hot && temp > state->baseline_temp) { if (state->rate_control_active) pr_info("CPU%d: %s temperature/speed normal (total events = %lu)\n", this_cpu, state->level == CORE_LEVEL ? "Core" : "Package", state->count); state->rate_control_active = false; return; } if (time_before64(now, state->next_check) && state->rate_control_active) goto re_arm; state->next_check = now + CHECK_INTERVAL; if (state->count != state->last_count) { /* There was one new thermal interrupt */ state->last_count = state->count; state->average = 0; state->sample_count = 0; state->sample_index = 0; } state->temp_samples[state->sample_index] = temp; state->sample_count++; state->sample_index = (state->sample_index + 1) % ARRAY_SIZE(state->temp_samples); if (state->sample_count < ARRAY_SIZE(state->temp_samples)) goto re_arm; avg = 0; for (i = 0; i < ARRAY_SIZE(state->temp_samples); ++i) avg += state->temp_samples[i]; avg /= ARRAY_SIZE(state->temp_samples); if (state->average > avg) { pr_warn("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n", this_cpu, state->level == CORE_LEVEL ? "Core" : "Package", state->count); state->rate_control_active = true; } state->average = avg; re_arm: thermal_clear_package_intr_status(state->level, THERM_STATUS_PROCHOT_LOG); schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL); } /*** * therm_throt_process - Process thermal throttling event from interrupt * @curr: Whether the condition is current or not (boolean), since the * thermal interrupt normally gets called both when the thermal * event begins and once the event has ended. * * This function is called by the thermal interrupt after the * IRQ has been acknowledged. * * It will take care of rate limiting and printing messages to the syslog. */ static void therm_throt_process(bool new_event, int event, int level) { struct _thermal_state *state; unsigned int this_cpu = smp_processor_id(); bool old_event; u64 now; struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); now = get_jiffies_64(); if (level == CORE_LEVEL) { if (event == THERMAL_THROTTLING_EVENT) state = &pstate->core_throttle; else if (event == POWER_LIMIT_EVENT) state = &pstate->core_power_limit; else return; } else if (level == PACKAGE_LEVEL) { if (event == THERMAL_THROTTLING_EVENT) state = &pstate->package_throttle; else if (event == POWER_LIMIT_EVENT) state = &pstate->package_power_limit; else return; } else return; old_event = state->new_event; state->new_event = new_event; if (new_event) state->count++; if (event != THERMAL_THROTTLING_EVENT) return; if (new_event && !state->last_interrupt_time) { bool hot; u8 temp; get_therm_status(state->level, &hot, &temp); /* * Ignore short temperature spike as the system is not close * to PROCHOT. 10C offset is large enough to ignore. It is * already dropped from the high threshold temperature. */ if (temp > 10) return; state->baseline_temp = temp; state->last_interrupt_time = now; schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL); } else if (old_event && state->last_interrupt_time) { unsigned long throttle_time; throttle_time = jiffies_delta_to_msecs(now - state->last_interrupt_time); if (throttle_time > state->max_time_ms) state->max_time_ms = throttle_time; state->total_time_ms += throttle_time; state->last_interrupt_time = 0; } } static int thresh_event_valid(int level, int event) { struct _thermal_state *state; unsigned int this_cpu = smp_processor_id(); struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); u64 now = get_jiffies_64(); if (level == PACKAGE_LEVEL) state = (event == 0) ? &pstate->pkg_thresh0 : &pstate->pkg_thresh1; else state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1; if (time_before64(now, state->next_check)) return 0; state->next_check = now + CHECK_INTERVAL; return 1; } static bool int_pln_enable; static int __init int_pln_enable_setup(char *s) { int_pln_enable = true; return 1; } __setup("int_pln_enable", int_pln_enable_setup); #ifdef CONFIG_SYSFS /* Add/Remove thermal_throttle interface for CPU device: */ static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu) { int err; struct cpuinfo_x86 *c = &cpu_data(cpu); err = sysfs_create_group(&dev->kobj, &thermal_attr_group); if (err) return err; if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) { err = sysfs_add_file_to_group(&dev->kobj, &dev_attr_core_power_limit_count.attr, thermal_attr_group.name); if (err) goto del_group; } if (cpu_has(c, X86_FEATURE_PTS)) { err = sysfs_add_file_to_group(&dev->kobj, &dev_attr_package_throttle_count.attr, thermal_attr_group.name); if (err) goto del_group; err = sysfs_add_file_to_group(&dev->kobj, &dev_attr_package_throttle_max_time_ms.attr, thermal_attr_group.name); if (err) goto del_group; err = sysfs_add_file_to_group(&dev->kobj, &dev_attr_package_throttle_total_time_ms.attr, thermal_attr_group.name); if (err) goto del_group; if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) { err = sysfs_add_file_to_group(&dev->kobj, &dev_attr_package_power_limit_count.attr, thermal_attr_group.name); if (err) goto del_group; } } return 0; del_group: sysfs_remove_group(&dev->kobj, &thermal_attr_group); return err; } static void thermal_throttle_remove_dev(struct device *dev) { sysfs_remove_group(&dev->kobj, &thermal_attr_group); } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ static int thermal_throttle_online(unsigned int cpu) { struct thermal_state *state = &per_cpu(thermal_state, cpu); struct device *dev = get_cpu_device(cpu); u32 l; state->package_throttle.level = PACKAGE_LEVEL; state->core_throttle.level = CORE_LEVEL; INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work); INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work); /* * The first CPU coming online will enable the HFI. Usually this causes * hardware to issue an HFI thermal interrupt. Such interrupt will reach * the CPU once we enable the thermal vector in the local APIC. */ intel_hfi_online(cpu); /* Unmask the thermal vector after the above workqueues are initialized. */ l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); return thermal_throttle_add_dev(dev, cpu); } static int thermal_throttle_offline(unsigned int cpu) { struct thermal_state *state = &per_cpu(thermal_state, cpu); struct device *dev = get_cpu_device(cpu); u32 l; /* Mask the thermal vector before draining evtl. pending work */ l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED); intel_hfi_offline(cpu); cancel_delayed_work_sync(&state->package_throttle.therm_work); cancel_delayed_work_sync(&state->core_throttle.therm_work); state->package_throttle.rate_control_active = false; state->core_throttle.rate_control_active = false; thermal_throttle_remove_dev(dev); return 0; } static __init int thermal_throttle_init_device(void) { int ret; if (!atomic_read(&therm_throt_en)) return 0; intel_hfi_init(); ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online", thermal_throttle_online, thermal_throttle_offline); return ret < 0 ? ret : 0; } device_initcall(thermal_throttle_init_device); #endif /* CONFIG_SYSFS */ static void notify_package_thresholds(__u64 msr_val) { bool notify_thres_0 = false; bool notify_thres_1 = false; if (!platform_thermal_package_notify) return; /* lower threshold check */ if (msr_val & THERM_LOG_THRESHOLD0) notify_thres_0 = true; /* higher threshold check */ if (msr_val & THERM_LOG_THRESHOLD1) notify_thres_1 = true; if (!notify_thres_0 && !notify_thres_1) return; if (platform_thermal_package_rate_control && platform_thermal_package_rate_control()) { /* Rate control is implemented in callback */ platform_thermal_package_notify(msr_val); return; } /* lower threshold reached */ if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0)) platform_thermal_package_notify(msr_val); /* higher threshold reached */ if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1)) platform_thermal_package_notify(msr_val); } static void notify_thresholds(__u64 msr_val) { /* check whether the interrupt handler is defined; * otherwise simply return */ if (!platform_thermal_notify) return; /* lower threshold reached */ if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(CORE_LEVEL, 0)) platform_thermal_notify(msr_val); /* higher threshold reached */ if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(CORE_LEVEL, 1)) platform_thermal_notify(msr_val); } void __weak notify_hwp_interrupt(void) { wrmsrl_safe(MSR_HWP_STATUS, 0); } /* Thermal transition interrupt handler */ void intel_thermal_interrupt(void) { __u64 msr_val; if (static_cpu_has(X86_FEATURE_HWP)) notify_hwp_interrupt(); rdmsrl(MSR_IA32_THERM_STATUS, msr_val); /* Check for violation of core thermal thresholds*/ notify_thresholds(msr_val); therm_throt_process(msr_val & THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, CORE_LEVEL); if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable) therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, CORE_LEVEL); if (this_cpu_has(X86_FEATURE_PTS)) { rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); /* check violations of package thermal thresholds */ notify_package_thresholds(msr_val); therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, PACKAGE_LEVEL); if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable) therm_throt_process(msr_val & PACKAGE_THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, PACKAGE_LEVEL); if (this_cpu_has(X86_FEATURE_HFI)) intel_hfi_process_event(msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED); } } /* Thermal monitoring depends on APIC, ACPI and clock modulation */ static int intel_thermal_supported(struct cpuinfo_x86 *c) { if (!boot_cpu_has(X86_FEATURE_APIC)) return 0; if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) return 0; return 1; } bool x86_thermal_enabled(void) { return atomic_read(&therm_throt_en); } void __init therm_lvt_init(void) { /* * This function is only called on boot CPU. Save the init thermal * LVT value on BSP and use that value to restore APs' thermal LVT * entry BIOS programmed later */ if (intel_thermal_supported(&boot_cpu_data)) lvtthmr_init = apic_read(APIC_LVTTHMR); } void intel_init_thermal(struct cpuinfo_x86 *c) { unsigned int cpu = smp_processor_id(); int tm2 = 0; u32 l, h; if (!intel_thermal_supported(c)) return; /* * First check if its enabled already, in which case there might * be some SMM goo which handles it, so we can't even put a handler * since it might be delivered via SMI already: */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); h = lvtthmr_init; /* * The initial value of thermal LVT entries on all APs always reads * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI * sequence to them and LVT registers are reset to 0s except for * the mask bits which are set to 1s when APs receive INIT IPI. * If BIOS takes over the thermal interrupt and sets its interrupt * delivery mode to SMI (not fixed), it restores the value that the * BIOS has programmed on AP based on BSP's info we saved since BIOS * is always setting the same value for all threads/cores. */ if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) apic_write(APIC_LVTTHMR, lvtthmr_init); if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { if (system_state == SYSTEM_BOOTING) pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu); return; } /* early Pentium M models use different method for enabling TM2 */ if (cpu_has(c, X86_FEATURE_TM2)) { if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { rdmsr(MSR_THERM2_CTL, l, h); if (l & MSR_THERM2_CTL_TM_SELECT) tm2 = 1; } else if (l & MSR_IA32_MISC_ENABLE_TM2) tm2 = 1; } /* We'll mask the thermal vector in the lapic till we're ready: */ h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; apic_write(APIC_LVTTHMR, h); thermal_intr_init_core_clear_mask(); thermal_intr_init_pkg_clear_mask(); rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) wrmsr(MSR_IA32_THERM_INTERRUPT, (l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h); else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) wrmsr(MSR_IA32_THERM_INTERRUPT, l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h); else wrmsr(MSR_IA32_THERM_INTERRUPT, l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); if (cpu_has(c, X86_FEATURE_PTS)) { rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, (l | (PACKAGE_THERM_INT_LOW_ENABLE | PACKAGE_THERM_INT_HIGH_ENABLE)) & ~PACKAGE_THERM_INT_PLN_ENABLE, h); else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l | (PACKAGE_THERM_INT_LOW_ENABLE | PACKAGE_THERM_INT_HIGH_ENABLE | PACKAGE_THERM_INT_PLN_ENABLE), h); else wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l | (PACKAGE_THERM_INT_LOW_ENABLE | PACKAGE_THERM_INT_HIGH_ENABLE), h); if (cpu_has(c, X86_FEATURE_HFI)) { rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l | PACKAGE_THERM_INT_HFI_ENABLE, h); } } rdmsr(MSR_IA32_MISC_ENABLE, l, h); wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); pr_info_once("CPU0: Thermal monitoring enabled (%s)\n", tm2 ? "TM2" : "TM1"); /* enable thermal throttle processing */ atomic_set(&therm_throt_en, 1); }
linux-master
drivers/thermal/intel/therm_throt.c
// SPDX-License-Identifier: GPL-2.0-only /* * x86_pkg_temp_thermal driver * Copyright (c) 2013, Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/intel_tcc.h> #include <linux/err.h> #include <linux/param.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/thermal.h> #include <linux/debugfs.h> #include <asm/cpu_device_id.h> #include "thermal_interrupt.h" /* * Rate control delay: Idea is to introduce denounce effect * This should be long enough to avoid reduce events, when * threshold is set to a temperature, which is constantly * violated, but at the short enough to take any action. * The action can be remove threshold or change it to next * interesting setting. Based on experiments, in around * every 5 seconds under load will give us a significant * temperature change. */ #define PKG_TEMP_THERMAL_NOTIFY_DELAY 5000 static int notify_delay_ms = PKG_TEMP_THERMAL_NOTIFY_DELAY; module_param(notify_delay_ms, int, 0644); MODULE_PARM_DESC(notify_delay_ms, "User space notification delay in milli seconds."); /* Number of trip points in thermal zone. Currently it can't * be more than 2. MSR can allow setting and getting notifications * for only 2 thresholds. This define enforces this, if there * is some wrong values returned by cpuid for number of thresholds. */ #define MAX_NUMBER_OF_TRIPS 2 struct zone_device { int cpu; bool work_scheduled; u32 msr_pkg_therm_low; u32 msr_pkg_therm_high; struct delayed_work work; struct thermal_zone_device *tzone; struct thermal_trip *trips; struct cpumask cpumask; }; static struct thermal_zone_params pkg_temp_tz_params = { .no_hwmon = true, }; /* Keep track of how many zone pointers we allocated in init() */ static int max_id __read_mostly; /* Array of zone pointers */ static struct zone_device **zones; /* Serializes interrupt notification, work and hotplug */ static DEFINE_RAW_SPINLOCK(pkg_temp_lock); /* Protects zone operation in the work function against hotplug removal */ static DEFINE_MUTEX(thermal_zone_mutex); /* The dynamically assigned cpu hotplug state for module_exit() */ static enum cpuhp_state pkg_thermal_hp_state __read_mostly; /* Debug counters to show using debugfs */ static struct dentry *debugfs; static unsigned int pkg_interrupt_cnt; static unsigned int pkg_work_cnt; static void pkg_temp_debugfs_init(void) { debugfs = debugfs_create_dir("pkg_temp_thermal", NULL); debugfs_create_u32("pkg_thres_interrupt", S_IRUGO, debugfs, &pkg_interrupt_cnt); debugfs_create_u32("pkg_thres_work", S_IRUGO, debugfs, &pkg_work_cnt); } /* * Protection: * * - cpu hotplug: Read serialized by cpu hotplug lock * Write must hold pkg_temp_lock * * - Other callsites: Must hold pkg_temp_lock */ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu) { int id = topology_logical_die_id(cpu); if (id >= 0 && id < max_id) return zones[id]; return NULL; } static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) { struct zone_device *zonedev = thermal_zone_device_priv(tzd); int val; val = intel_tcc_get_temp(zonedev->cpu, true); if (val < 0) return val; *temp = val * 1000; pr_debug("sys_get_curr_temp %d\n", *temp); return 0; } static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) { struct zone_device *zonedev = thermal_zone_device_priv(tzd); u32 l, h, mask, shift, intr; int tj_max, val, ret; tj_max = intel_tcc_get_tjmax(zonedev->cpu); if (tj_max < 0) return tj_max; tj_max *= 1000; val = (tj_max - temp)/1000; if (trip >= MAX_NUMBER_OF_TRIPS || val < 0 || val > 0x7f) return -EINVAL; ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); if (ret < 0) return ret; if (trip) { mask = THERM_MASK_THRESHOLD1; shift = THERM_SHIFT_THRESHOLD1; intr = THERM_INT_THRESHOLD1_ENABLE; } else { mask = THERM_MASK_THRESHOLD0; shift = THERM_SHIFT_THRESHOLD0; intr = THERM_INT_THRESHOLD0_ENABLE; } l &= ~mask; /* * When users space sets a trip temperature == 0, which is indication * that, it is no longer interested in receiving notifications. */ if (!temp) { l &= ~intr; } else { l |= val << shift; l |= intr; } return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); } /* Thermal zone callback registry */ static struct thermal_zone_device_ops tzone_ops = { .get_temp = sys_get_curr_temp, .set_trip_temp = sys_set_trip_temp, }; static bool pkg_thermal_rate_control(void) { return true; } /* Enable threshold interrupt on local package/cpu */ static inline void enable_pkg_thres_interrupt(void) { u8 thres_0, thres_1; u32 l, h; rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); /* only enable/disable if it had valid threshold value */ thres_0 = (l & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0; thres_1 = (l & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1; if (thres_0) l |= THERM_INT_THRESHOLD0_ENABLE; if (thres_1) l |= THERM_INT_THRESHOLD1_ENABLE; wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); } /* Disable threshold interrupt on local package/cpu */ static inline void disable_pkg_thres_interrupt(void) { u32 l, h; rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l &= ~(THERM_INT_THRESHOLD0_ENABLE | THERM_INT_THRESHOLD1_ENABLE); wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); } static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) { struct thermal_zone_device *tzone = NULL; int cpu = smp_processor_id(); struct zone_device *zonedev; mutex_lock(&thermal_zone_mutex); raw_spin_lock_irq(&pkg_temp_lock); ++pkg_work_cnt; zonedev = pkg_temp_thermal_get_dev(cpu); if (!zonedev) { raw_spin_unlock_irq(&pkg_temp_lock); mutex_unlock(&thermal_zone_mutex); return; } zonedev->work_scheduled = false; thermal_clear_package_intr_status(PACKAGE_LEVEL, THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1); tzone = zonedev->tzone; enable_pkg_thres_interrupt(); raw_spin_unlock_irq(&pkg_temp_lock); /* * If tzone is not NULL, then thermal_zone_mutex will prevent the * concurrent removal in the cpu offline callback. */ if (tzone) thermal_zone_device_update(tzone, THERMAL_EVENT_UNSPECIFIED); mutex_unlock(&thermal_zone_mutex); } static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) { unsigned long ms = msecs_to_jiffies(notify_delay_ms); schedule_delayed_work_on(cpu, work, ms); } static int pkg_thermal_notify(u64 msr_val) { int cpu = smp_processor_id(); struct zone_device *zonedev; unsigned long flags; raw_spin_lock_irqsave(&pkg_temp_lock, flags); ++pkg_interrupt_cnt; disable_pkg_thres_interrupt(); /* Work is per package, so scheduling it once is enough. */ zonedev = pkg_temp_thermal_get_dev(cpu); if (zonedev && !zonedev->work_scheduled) { zonedev->work_scheduled = true; pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work); } raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); return 0; } static struct thermal_trip *pkg_temp_thermal_trips_init(int cpu, int tj_max, int num_trips) { struct thermal_trip *trips; unsigned long thres_reg_value; u32 mask, shift, eax, edx; int ret, i; trips = kzalloc(sizeof(*trips) * num_trips, GFP_KERNEL); if (!trips) return ERR_PTR(-ENOMEM); for (i = 0; i < num_trips; i++) { if (i) { mask = THERM_MASK_THRESHOLD1; shift = THERM_SHIFT_THRESHOLD1; } else { mask = THERM_MASK_THRESHOLD0; shift = THERM_SHIFT_THRESHOLD0; } ret = rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &eax, &edx); if (ret < 0) { kfree(trips); return ERR_PTR(ret); } thres_reg_value = (eax & mask) >> shift; trips[i].temperature = thres_reg_value ? tj_max - thres_reg_value * 1000 : THERMAL_TEMP_INVALID; trips[i].type = THERMAL_TRIP_PASSIVE; pr_debug("%s: cpu=%d, trip=%d, temp=%d\n", __func__, cpu, i, trips[i].temperature); } return trips; } static int pkg_temp_thermal_device_add(unsigned int cpu) { int id = topology_logical_die_id(cpu); u32 eax, ebx, ecx, edx; struct zone_device *zonedev; int thres_count, err; int tj_max; if (id >= max_id) return -ENOMEM; cpuid(6, &eax, &ebx, &ecx, &edx); thres_count = ebx & 0x07; if (!thres_count) return -ENODEV; thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS); tj_max = intel_tcc_get_tjmax(cpu); if (tj_max < 0) return tj_max; zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL); if (!zonedev) return -ENOMEM; zonedev->trips = pkg_temp_thermal_trips_init(cpu, tj_max, thres_count); if (IS_ERR(zonedev->trips)) { err = PTR_ERR(zonedev->trips); goto out_kfree_zonedev; } INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn); zonedev->cpu = cpu; zonedev->tzone = thermal_zone_device_register_with_trips("x86_pkg_temp", zonedev->trips, thres_count, (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01, zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0); if (IS_ERR(zonedev->tzone)) { err = PTR_ERR(zonedev->tzone); goto out_kfree_trips; } err = thermal_zone_device_enable(zonedev->tzone); if (err) goto out_unregister_tz; /* Store MSR value for package thermal interrupt, to restore at exit */ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low, zonedev->msr_pkg_therm_high); cpumask_set_cpu(cpu, &zonedev->cpumask); raw_spin_lock_irq(&pkg_temp_lock); zones[id] = zonedev; raw_spin_unlock_irq(&pkg_temp_lock); return 0; out_unregister_tz: thermal_zone_device_unregister(zonedev->tzone); out_kfree_trips: kfree(zonedev->trips); out_kfree_zonedev: kfree(zonedev); return err; } static int pkg_thermal_cpu_offline(unsigned int cpu) { struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu); bool lastcpu, was_target; int target; if (!zonedev) return 0; target = cpumask_any_but(&zonedev->cpumask, cpu); cpumask_clear_cpu(cpu, &zonedev->cpumask); lastcpu = target >= nr_cpu_ids; /* * Remove the sysfs files, if this is the last cpu in the package * before doing further cleanups. */ if (lastcpu) { struct thermal_zone_device *tzone = zonedev->tzone; /* * We must protect against a work function calling * thermal_zone_update, after/while unregister. We null out * the pointer under the zone mutex, so the worker function * won't try to call. */ mutex_lock(&thermal_zone_mutex); zonedev->tzone = NULL; mutex_unlock(&thermal_zone_mutex); thermal_zone_device_unregister(tzone); } /* Protect against work and interrupts */ raw_spin_lock_irq(&pkg_temp_lock); /* * Check whether this cpu was the current target and store the new * one. When we drop the lock, then the interrupt notify function * will see the new target. */ was_target = zonedev->cpu == cpu; zonedev->cpu = target; /* * If this is the last CPU in the package remove the package * reference from the array and restore the interrupt MSR. When we * drop the lock neither the interrupt notify function nor the * worker will see the package anymore. */ if (lastcpu) { zones[topology_logical_die_id(cpu)] = NULL; /* After this point nothing touches the MSR anymore. */ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low, zonedev->msr_pkg_therm_high); } /* * Check whether there is work scheduled and whether the work is * targeted at the outgoing CPU. */ if (zonedev->work_scheduled && was_target) { /* * To cancel the work we need to drop the lock, otherwise * we might deadlock if the work needs to be flushed. */ raw_spin_unlock_irq(&pkg_temp_lock); cancel_delayed_work_sync(&zonedev->work); raw_spin_lock_irq(&pkg_temp_lock); /* * If this is not the last cpu in the package and the work * did not run after we dropped the lock above, then we * need to reschedule the work, otherwise the interrupt * stays disabled forever. */ if (!lastcpu && zonedev->work_scheduled) pkg_thermal_schedule_work(target, &zonedev->work); } raw_spin_unlock_irq(&pkg_temp_lock); /* Final cleanup if this is the last cpu */ if (lastcpu) { kfree(zonedev->trips); kfree(zonedev); } return 0; } static int pkg_thermal_cpu_online(unsigned int cpu) { struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu); /* Paranoia check */ if (!cpu_has(c, X86_FEATURE_DTHERM) || !cpu_has(c, X86_FEATURE_PTS)) return -ENODEV; /* If the package exists, nothing to do */ if (zonedev) { cpumask_set_cpu(cpu, &zonedev->cpumask); return 0; } return pkg_temp_thermal_device_add(cpu); } static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = { X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_PTS, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids); static int __init pkg_temp_thermal_init(void) { int ret; if (!x86_match_cpu(pkg_temp_thermal_ids)) return -ENODEV; max_id = topology_max_packages() * topology_max_die_per_package(); zones = kcalloc(max_id, sizeof(struct zone_device *), GFP_KERNEL); if (!zones) return -ENOMEM; ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online", pkg_thermal_cpu_online, pkg_thermal_cpu_offline); if (ret < 0) goto err; /* Store the state for module exit */ pkg_thermal_hp_state = ret; platform_thermal_package_notify = pkg_thermal_notify; platform_thermal_package_rate_control = pkg_thermal_rate_control; /* Don't care if it fails */ pkg_temp_debugfs_init(); return 0; err: kfree(zones); return ret; } module_init(pkg_temp_thermal_init) static void __exit pkg_temp_thermal_exit(void) { platform_thermal_package_notify = NULL; platform_thermal_package_rate_control = NULL; cpuhp_remove_state(pkg_thermal_hp_state); debugfs_remove_recursive(debugfs); kfree(zones); } module_exit(pkg_temp_thermal_exit) MODULE_IMPORT_NS(INTEL_TCC); MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver"); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/x86_pkg_temp_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Intel Broxton PMIC thermal driver * * Copyright (C) 2016 Intel Corporation. All rights reserved. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/thermal.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/mfd/intel_soc_pmic.h> #define BXTWC_THRM0IRQ 0x4E04 #define BXTWC_THRM1IRQ 0x4E05 #define BXTWC_THRM2IRQ 0x4E06 #define BXTWC_MTHRM0IRQ 0x4E12 #define BXTWC_MTHRM1IRQ 0x4E13 #define BXTWC_MTHRM2IRQ 0x4E14 #define BXTWC_STHRM0IRQ 0x4F19 #define BXTWC_STHRM1IRQ 0x4F1A #define BXTWC_STHRM2IRQ 0x4F1B struct trip_config_map { u16 irq_reg; u16 irq_en; u16 evt_stat; u8 irq_mask; u8 irq_en_mask; u8 evt_mask; u8 trip_num; }; struct thermal_irq_map { char handle[20]; int num_trips; const struct trip_config_map *trip_config; }; struct pmic_thermal_data { const struct thermal_irq_map *maps; int num_maps; }; static const struct trip_config_map bxtwc_str0_trip_config[] = { { .irq_reg = BXTWC_THRM0IRQ, .irq_mask = 0x01, .irq_en = BXTWC_MTHRM0IRQ, .irq_en_mask = 0x01, .evt_stat = BXTWC_STHRM0IRQ, .evt_mask = 0x01, .trip_num = 0 }, { .irq_reg = BXTWC_THRM0IRQ, .irq_mask = 0x10, .irq_en = BXTWC_MTHRM0IRQ, .irq_en_mask = 0x10, .evt_stat = BXTWC_STHRM0IRQ, .evt_mask = 0x10, .trip_num = 1 } }; static const struct trip_config_map bxtwc_str1_trip_config[] = { { .irq_reg = BXTWC_THRM0IRQ, .irq_mask = 0x02, .irq_en = BXTWC_MTHRM0IRQ, .irq_en_mask = 0x02, .evt_stat = BXTWC_STHRM0IRQ, .evt_mask = 0x02, .trip_num = 0 }, { .irq_reg = BXTWC_THRM0IRQ, .irq_mask = 0x20, .irq_en = BXTWC_MTHRM0IRQ, .irq_en_mask = 0x20, .evt_stat = BXTWC_STHRM0IRQ, .evt_mask = 0x20, .trip_num = 1 }, }; static const struct trip_config_map bxtwc_str2_trip_config[] = { { .irq_reg = BXTWC_THRM0IRQ, .irq_mask = 0x04, .irq_en = BXTWC_MTHRM0IRQ, .irq_en_mask = 0x04, .evt_stat = BXTWC_STHRM0IRQ, .evt_mask = 0x04, .trip_num = 0 }, { .irq_reg = BXTWC_THRM0IRQ, .irq_mask = 0x40, .irq_en = BXTWC_MTHRM0IRQ, .irq_en_mask = 0x40, .evt_stat = BXTWC_STHRM0IRQ, .evt_mask = 0x40, .trip_num = 1 }, }; static const struct trip_config_map bxtwc_str3_trip_config[] = { { .irq_reg = BXTWC_THRM2IRQ, .irq_mask = 0x10, .irq_en = BXTWC_MTHRM2IRQ, .irq_en_mask = 0x10, .evt_stat = BXTWC_STHRM2IRQ, .evt_mask = 0x10, .trip_num = 0 }, }; static const struct thermal_irq_map bxtwc_thermal_irq_map[] = { { .handle = "STR0", .trip_config = bxtwc_str0_trip_config, .num_trips = ARRAY_SIZE(bxtwc_str0_trip_config), }, { .handle = "STR1", .trip_config = bxtwc_str1_trip_config, .num_trips = ARRAY_SIZE(bxtwc_str1_trip_config), }, { .handle = "STR2", .trip_config = bxtwc_str2_trip_config, .num_trips = ARRAY_SIZE(bxtwc_str2_trip_config), }, { .handle = "STR3", .trip_config = bxtwc_str3_trip_config, .num_trips = ARRAY_SIZE(bxtwc_str3_trip_config), }, }; static const struct pmic_thermal_data bxtwc_thermal_data = { .maps = bxtwc_thermal_irq_map, .num_maps = ARRAY_SIZE(bxtwc_thermal_irq_map), }; static irqreturn_t pmic_thermal_irq_handler(int irq, void *data) { struct platform_device *pdev = data; struct thermal_zone_device *tzd; struct pmic_thermal_data *td; struct intel_soc_pmic *pmic; struct regmap *regmap; u8 reg_val, mask, irq_stat; u16 reg, evt_stat_reg; int i, j, ret; pmic = dev_get_drvdata(pdev->dev.parent); regmap = pmic->regmap; td = (struct pmic_thermal_data *) platform_get_device_id(pdev)->driver_data; /* Resolve thermal irqs */ for (i = 0; i < td->num_maps; i++) { for (j = 0; j < td->maps[i].num_trips; j++) { reg = td->maps[i].trip_config[j].irq_reg; mask = td->maps[i].trip_config[j].irq_mask; /* * Read the irq register to resolve whether the * interrupt was triggered for this sensor */ if (regmap_read(regmap, reg, &ret)) return IRQ_HANDLED; reg_val = (u8)ret; irq_stat = ((u8)ret & mask); if (!irq_stat) continue; /* * Read the status register to find out what * event occurred i.e a high or a low */ evt_stat_reg = td->maps[i].trip_config[j].evt_stat; if (regmap_read(regmap, evt_stat_reg, &ret)) return IRQ_HANDLED; tzd = thermal_zone_get_zone_by_name(td->maps[i].handle); if (!IS_ERR(tzd)) thermal_zone_device_update(tzd, THERMAL_EVENT_UNSPECIFIED); /* Clear the appropriate irq */ regmap_write(regmap, reg, reg_val & mask); } } return IRQ_HANDLED; } static int pmic_thermal_probe(struct platform_device *pdev) { struct regmap_irq_chip_data *regmap_irq_chip; struct pmic_thermal_data *thermal_data; int ret, irq, virq, i, j, pmic_irq_count; struct intel_soc_pmic *pmic; struct regmap *regmap; struct device *dev; u16 reg; u8 mask; dev = &pdev->dev; pmic = dev_get_drvdata(pdev->dev.parent); if (!pmic) { dev_err(dev, "Failed to get struct intel_soc_pmic pointer\n"); return -ENODEV; } thermal_data = (struct pmic_thermal_data *) platform_get_device_id(pdev)->driver_data; if (!thermal_data) { dev_err(dev, "No thermal data initialized!!\n"); return -ENODEV; } regmap = pmic->regmap; regmap_irq_chip = pmic->irq_chip_data; pmic_irq_count = 0; while ((irq = platform_get_irq(pdev, pmic_irq_count)) != -ENXIO) { virq = regmap_irq_get_virq(regmap_irq_chip, irq); if (virq < 0) { dev_err(dev, "failed to get virq by irq %d\n", irq); return virq; } ret = devm_request_threaded_irq(&pdev->dev, virq, NULL, pmic_thermal_irq_handler, IRQF_ONESHOT, "pmic_thermal", pdev); if (ret) { dev_err(dev, "request irq(%d) failed: %d\n", virq, ret); return ret; } pmic_irq_count++; } /* Enable thermal interrupts */ for (i = 0; i < thermal_data->num_maps; i++) { for (j = 0; j < thermal_data->maps[i].num_trips; j++) { reg = thermal_data->maps[i].trip_config[j].irq_en; mask = thermal_data->maps[i].trip_config[j].irq_en_mask; ret = regmap_update_bits(regmap, reg, mask, 0x00); if (ret) return ret; } } return 0; } static const struct platform_device_id pmic_thermal_id_table[] = { { .name = "bxt_wcove_thermal", .driver_data = (kernel_ulong_t)&bxtwc_thermal_data, }, {}, }; static struct platform_driver pmic_thermal_driver = { .probe = pmic_thermal_probe, .driver = { .name = "pmic_thermal", }, .id_table = pmic_thermal_id_table, }; MODULE_DEVICE_TABLE(platform, pmic_thermal_id_table); module_platform_driver(pmic_thermal_driver); MODULE_AUTHOR("Yegnesh S Iyer <[email protected]>"); MODULE_DESCRIPTION("Intel Broxton PMIC Thermal Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/intel_bxt_pmic_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* intel_pch_thermal.c - Intel PCH Thermal driver * * Copyright (c) 2015, Intel Corporation. * * Authors: * Tushar Dave <[email protected]> */ #include <linux/acpi.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/suspend.h> #include <linux/thermal.h> #include <linux/types.h> #include <linux/units.h> /* Intel PCH thermal Device IDs */ #define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */ #define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */ #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ #define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */ #define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */ #define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */ #define PCH_THERMAL_DID_CNL_H 0xA379 /* CNL-H PCH */ #define PCH_THERMAL_DID_CNL_LP 0x02F9 /* CNL-LP PCH */ #define PCH_THERMAL_DID_CML_H 0X06F9 /* CML-H PCH */ #define PCH_THERMAL_DID_LWB 0xA1B1 /* Lewisburg PCH */ #define PCH_THERMAL_DID_WBG 0x8D24 /* Wellsburg PCH */ /* Wildcat Point-LP PCH Thermal registers */ #define WPT_TEMP 0x0000 /* Temperature */ #define WPT_TSC 0x04 /* Thermal Sensor Control */ #define WPT_TSS 0x06 /* Thermal Sensor Status */ #define WPT_TSEL 0x08 /* Thermal Sensor Enable and Lock */ #define WPT_TSREL 0x0A /* Thermal Sensor Report Enable and Lock */ #define WPT_TSMIC 0x0C /* Thermal Sensor SMI Control */ #define WPT_CTT 0x0010 /* Catastrophic Trip Point */ #define WPT_TSPM 0x001C /* Thermal Sensor Power Management */ #define WPT_TAHV 0x0014 /* Thermal Alert High Value */ #define WPT_TALV 0x0018 /* Thermal Alert Low Value */ #define WPT_TL 0x00000040 /* Throttle Value */ #define WPT_PHL 0x0060 /* PCH Hot Level */ #define WPT_PHLC 0x62 /* PHL Control */ #define WPT_TAS 0x80 /* Thermal Alert Status */ #define WPT_TSPIEN 0x82 /* PCI Interrupt Event Enables */ #define WPT_TSGPEN 0x84 /* General Purpose Event Enables */ /* Wildcat Point-LP PCH Thermal Register bit definitions */ #define WPT_TEMP_TSR 0x01ff /* Temp TS Reading */ #define WPT_TSC_CPDE 0x01 /* Catastrophic Power-Down Enable */ #define WPT_TSS_TSDSS 0x10 /* Thermal Sensor Dynamic Shutdown Status */ #define WPT_TSS_GPES 0x08 /* GPE status */ #define WPT_TSEL_ETS 0x01 /* Enable TS */ #define WPT_TSEL_PLDB 0x80 /* TSEL Policy Lock-Down Bit */ #define WPT_TL_TOL 0x000001FF /* T0 Level */ #define WPT_TL_T1L 0x1ff00000 /* T1 Level */ #define WPT_TL_TTEN 0x20000000 /* TT Enable */ /* Resolution of 1/2 degree C and an offset of -50C */ #define PCH_TEMP_OFFSET (-50) #define GET_WPT_TEMP(x) ((x) * MILLIDEGREE_PER_DEGREE / 2 + WPT_TEMP_OFFSET) #define WPT_TEMP_OFFSET (PCH_TEMP_OFFSET * MILLIDEGREE_PER_DEGREE) #define GET_PCH_TEMP(x) (((x) / 2) + PCH_TEMP_OFFSET) #define PCH_MAX_TRIPS 3 /* critical, hot, passive */ /* Amount of time for each cooling delay, 100ms by default for now */ static unsigned int delay_timeout = 100; module_param(delay_timeout, int, 0644); MODULE_PARM_DESC(delay_timeout, "amount of time delay for each iteration."); /* Number of iterations for cooling delay, 600 counts by default for now */ static unsigned int delay_cnt = 600; module_param(delay_cnt, int, 0644); MODULE_PARM_DESC(delay_cnt, "total number of iterations for time delay."); static char driver_name[] = "Intel PCH thermal driver"; struct pch_thermal_device { void __iomem *hw_base; struct pci_dev *pdev; struct thermal_zone_device *tzd; struct thermal_trip trips[PCH_MAX_TRIPS]; bool bios_enabled; }; #ifdef CONFIG_ACPI /* * On some platforms, there is a companion ACPI device, which adds * passive trip temperature using _PSV method. There is no specific * passive temperature setting in MMIO interface of this PCI device. */ static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, int trip) { struct acpi_device *adev; int temp; adev = ACPI_COMPANION(&ptd->pdev->dev); if (!adev) return 0; if (thermal_acpi_passive_trip_temp(adev, &temp) || temp <= 0) return 0; ptd->trips[trip].type = THERMAL_TRIP_PASSIVE; ptd->trips[trip].temperature = temp; return 1; } #else static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, int trip) { return 0; } #endif static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp) { struct pch_thermal_device *ptd = thermal_zone_device_priv(tzd); *temp = GET_WPT_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP)); return 0; } static void pch_critical(struct thermal_zone_device *tzd) { dev_dbg(thermal_zone_device(tzd), "%s: critical temperature reached\n", thermal_zone_device_type(tzd)); } static struct thermal_zone_device_ops tzd_ops = { .get_temp = pch_thermal_get_temp, .critical = pch_critical, }; enum pch_board_ids { PCH_BOARD_HSW = 0, PCH_BOARD_WPT, PCH_BOARD_SKL, PCH_BOARD_CNL, PCH_BOARD_CML, PCH_BOARD_LWB, PCH_BOARD_WBG, }; static const char *board_names[] = { [PCH_BOARD_HSW] = "pch_haswell", [PCH_BOARD_WPT] = "pch_wildcat_point", [PCH_BOARD_SKL] = "pch_skylake", [PCH_BOARD_CNL] = "pch_cannonlake", [PCH_BOARD_CML] = "pch_cometlake", [PCH_BOARD_LWB] = "pch_lewisburg", [PCH_BOARD_WBG] = "pch_wellsburg", }; static int intel_pch_thermal_probe(struct pci_dev *pdev, const struct pci_device_id *id) { enum pch_board_ids board_id = id->driver_data; struct pch_thermal_device *ptd; int nr_trips = 0; u16 trip_temp; u8 tsel; int err; ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL); if (!ptd) return -ENOMEM; pci_set_drvdata(pdev, ptd); ptd->pdev = pdev; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device\n"); return err; } err = pci_request_regions(pdev, driver_name); if (err) { dev_err(&pdev->dev, "failed to request pci region\n"); goto error_disable; } ptd->hw_base = pci_ioremap_bar(pdev, 0); if (!ptd->hw_base) { err = -ENOMEM; dev_err(&pdev->dev, "failed to map mem base\n"); goto error_release; } /* Check if BIOS has already enabled thermal sensor */ if (WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL)) { ptd->bios_enabled = true; goto read_trips; } tsel = readb(ptd->hw_base + WPT_TSEL); /* * When TSEL's Policy Lock-Down bit is 1, TSEL become RO. * If so, thermal sensor cannot enable. Bail out. */ if (tsel & WPT_TSEL_PLDB) { dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n"); err = -ENODEV; goto error_cleanup; } writeb(tsel|WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL); if (!(WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL))) { dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n"); err = -ENODEV; goto error_cleanup; } read_trips: trip_temp = readw(ptd->hw_base + WPT_CTT); trip_temp &= 0x1FF; if (trip_temp) { ptd->trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp); ptd->trips[nr_trips++].type = THERMAL_TRIP_CRITICAL; } trip_temp = readw(ptd->hw_base + WPT_PHL); trip_temp &= 0x1FF; if (trip_temp) { ptd->trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp); ptd->trips[nr_trips++].type = THERMAL_TRIP_HOT; } nr_trips += pch_wpt_add_acpi_psv_trip(ptd, nr_trips); ptd->tzd = thermal_zone_device_register_with_trips(board_names[board_id], ptd->trips, nr_trips, 0, ptd, &tzd_ops, NULL, 0, 0); if (IS_ERR(ptd->tzd)) { dev_err(&pdev->dev, "Failed to register thermal zone %s\n", board_names[board_id]); err = PTR_ERR(ptd->tzd); goto error_cleanup; } err = thermal_zone_device_enable(ptd->tzd); if (err) goto err_unregister; return 0; err_unregister: thermal_zone_device_unregister(ptd->tzd); error_cleanup: iounmap(ptd->hw_base); error_release: pci_release_regions(pdev); error_disable: pci_disable_device(pdev); dev_err(&pdev->dev, "pci device failed to probe\n"); return err; } static void intel_pch_thermal_remove(struct pci_dev *pdev) { struct pch_thermal_device *ptd = pci_get_drvdata(pdev); thermal_zone_device_unregister(ptd->tzd); iounmap(ptd->hw_base); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); } static int intel_pch_thermal_suspend_noirq(struct device *device) { struct pch_thermal_device *ptd = dev_get_drvdata(device); u16 pch_thr_temp, pch_cur_temp; int pch_delay_cnt = 0; u8 tsel; /* Shutdown the thermal sensor if it is not enabled by BIOS */ if (!ptd->bios_enabled) { tsel = readb(ptd->hw_base + WPT_TSEL); writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL); return 0; } /* Do not check temperature if it is not s2idle */ if (pm_suspend_via_firmware()) return 0; /* Get the PCH temperature threshold value */ pch_thr_temp = GET_PCH_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TSPM)); /* Get the PCH current temperature value */ pch_cur_temp = GET_PCH_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP)); /* * If current PCH temperature is higher than configured PCH threshold * value, run some delay loop with sleep to let the current temperature * go down below the threshold value which helps to allow system enter * lower power S0ix suspend state. Even after delay loop if PCH current * temperature stays above threshold, notify the warning message * which helps to indentify the reason why S0ix entry was rejected. */ while (pch_delay_cnt < delay_cnt) { if (pch_cur_temp < pch_thr_temp) break; if (pm_wakeup_pending()) { dev_warn(&ptd->pdev->dev, "Wakeup event detected, abort cooling\n"); return 0; } pch_delay_cnt++; dev_dbg(&ptd->pdev->dev, "CPU-PCH current temp [%dC] higher than the threshold temp [%dC], sleep %d times for %d ms duration\n", pch_cur_temp, pch_thr_temp, pch_delay_cnt, delay_timeout); msleep(delay_timeout); /* Read the PCH current temperature for next cycle. */ pch_cur_temp = GET_PCH_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP)); } if (pch_cur_temp >= pch_thr_temp) dev_warn(&ptd->pdev->dev, "CPU-PCH is hot [%dC] after %d ms delay. S0ix might fail\n", pch_cur_temp, pch_delay_cnt * delay_timeout); else { if (pch_delay_cnt) dev_info(&ptd->pdev->dev, "CPU-PCH is cool [%dC] after %d ms delay\n", pch_cur_temp, pch_delay_cnt * delay_timeout); else dev_info(&ptd->pdev->dev, "CPU-PCH is cool [%dC]\n", pch_cur_temp); } return 0; } static int intel_pch_thermal_resume(struct device *device) { struct pch_thermal_device *ptd = dev_get_drvdata(device); u8 tsel; if (ptd->bios_enabled) return 0; tsel = readb(ptd->hw_base + WPT_TSEL); writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL); return 0; } static const struct pci_device_id intel_pch_thermal_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1), .driver_data = PCH_BOARD_HSW, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2), .driver_data = PCH_BOARD_HSW, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT), .driver_data = PCH_BOARD_WPT, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL), .driver_data = PCH_BOARD_SKL, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H), .driver_data = PCH_BOARD_SKL, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL), .driver_data = PCH_BOARD_CNL, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H), .driver_data = PCH_BOARD_CNL, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_LP), .driver_data = PCH_BOARD_CNL, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CML_H), .driver_data = PCH_BOARD_CML, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_LWB), .driver_data = PCH_BOARD_LWB, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WBG), .driver_data = PCH_BOARD_WBG, }, { 0, }, }; MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); static const struct dev_pm_ops intel_pch_pm_ops = { .suspend_noirq = intel_pch_thermal_suspend_noirq, .resume = intel_pch_thermal_resume, }; static struct pci_driver intel_pch_thermal_driver = { .name = "intel_pch_thermal", .id_table = intel_pch_thermal_id, .probe = intel_pch_thermal_probe, .remove = intel_pch_thermal_remove, .driver.pm = &intel_pch_pm_ops, }; module_pci_driver(intel_pch_thermal_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Intel PCH Thermal driver");
linux-master
drivers/thermal/intel/intel_pch_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * cooling device driver that activates the processor throttling by * programming the TCC Offset register. * Copyright (c) 2021, Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/intel_tcc.h> #include <linux/module.h> #include <linux/thermal.h> #include <asm/cpu_device_id.h> #define TCC_PROGRAMMABLE BIT(30) #define TCC_LOCKED BIT(31) static struct thermal_cooling_device *tcc_cdev; static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { *state = 0x3f; return 0; } static int tcc_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { int offset = intel_tcc_get_offset(-1); if (offset < 0) return offset; *state = offset; return 0; } static int tcc_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { return intel_tcc_set_offset(-1, (int)state); } static const struct thermal_cooling_device_ops tcc_cooling_ops = { .get_max_state = tcc_get_max_state, .get_cur_state = tcc_get_cur_state, .set_cur_state = tcc_set_cur_state, }; static const struct x86_cpu_id tcc_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, NULL), X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, tcc_ids); static int __init tcc_cooling_init(void) { int ret; u64 val; const struct x86_cpu_id *id; int err; id = x86_match_cpu(tcc_ids); if (!id) return -ENODEV; err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); if (err) return err; if (!(val & TCC_PROGRAMMABLE)) return -ENODEV; err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val); if (err) return err; if (val & TCC_LOCKED) { pr_info("TCC Offset locked\n"); return -ENODEV; } pr_info("Programmable TCC Offset detected\n"); tcc_cdev = thermal_cooling_device_register("TCC Offset", NULL, &tcc_cooling_ops); if (IS_ERR(tcc_cdev)) { ret = PTR_ERR(tcc_cdev); return ret; } return 0; } module_init(tcc_cooling_init) static void __exit tcc_cooling_exit(void) { thermal_cooling_device_unregister(tcc_cdev); } module_exit(tcc_cooling_exit) MODULE_IMPORT_NS(INTEL_TCC); MODULE_DESCRIPTION("TCC offset cooling device Driver"); MODULE_AUTHOR("Zhang Rui <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/intel_tcc_cooling.c
// SPDX-License-Identifier: GPL-2.0-only /* * intel_powerclamp.c - package c-state idle injection * * Copyright (c) 2012-2023, Intel Corporation. * * Authors: * Arjan van de Ven <[email protected]> * Jacob Pan <[email protected]> * * TODO: * 1. better handle wakeup from external interrupts, currently a fixed * compensation is added to clamping duration when excessive amount * of wakeups are observed during idle time. the reason is that in * case of external interrupts without need for ack, clamping down * cpu in non-irq context does not reduce irq. for majority of the * cases, clamping down cpu does help reduce irq as well, we should * be able to differentiate the two cases and give a quantitative * solution for the irqs that we can control. perhaps based on * get_cpu_iowait_time_us() * * 2. synchronization with other hw blocks */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/cpu.h> #include <linux/thermal.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/idle_inject.h> #include <asm/msr.h> #include <asm/mwait.h> #include <asm/cpu_device_id.h> #define MAX_TARGET_RATIO (100U) /* For each undisturbed clamping period (no extra wake ups during idle time), * we increment the confidence counter for the given target ratio. * CONFIDENCE_OK defines the level where runtime calibration results are * valid. */ #define CONFIDENCE_OK (3) /* Default idle injection duration, driver adjust sleep time to meet target * idle ratio. Similar to frequency modulation. */ #define DEFAULT_DURATION_JIFFIES (6) static unsigned int target_mwait; static struct dentry *debug_dir; static bool poll_pkg_cstate_enable; /* Idle ratio observed using package C-state counters */ static unsigned int current_ratio; /* Skip the idle injection till set to true */ static bool should_skip; struct powerclamp_data { unsigned int cpu; unsigned int count; unsigned int guard; unsigned int window_size_now; unsigned int target_ratio; bool clamping; }; static struct powerclamp_data powerclamp_data; static struct thermal_cooling_device *cooling_dev; static DEFINE_MUTEX(powerclamp_lock); /* This duration is in microseconds */ static unsigned int duration; static unsigned int pkg_cstate_ratio_cur; static unsigned int window_size; static int duration_set(const char *arg, const struct kernel_param *kp) { int ret = 0; unsigned long new_duration; ret = kstrtoul(arg, 10, &new_duration); if (ret) goto exit; if (new_duration > 25 || new_duration < 6) { pr_err("Out of recommended range %lu, between 6-25ms\n", new_duration); ret = -EINVAL; goto exit; } mutex_lock(&powerclamp_lock); duration = clamp(new_duration, 6ul, 25ul) * 1000; mutex_unlock(&powerclamp_lock); exit: return ret; } static int duration_get(char *buf, const struct kernel_param *kp) { int ret; mutex_lock(&powerclamp_lock); ret = sysfs_emit(buf, "%d\n", duration / 1000); mutex_unlock(&powerclamp_lock); return ret; } static const struct kernel_param_ops duration_ops = { .set = duration_set, .get = duration_get, }; module_param_cb(duration, &duration_ops, NULL, 0644); MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec."); #define DEFAULT_MAX_IDLE 50 #define MAX_ALL_CPU_IDLE 75 static u8 max_idle = DEFAULT_MAX_IDLE; static cpumask_var_t idle_injection_cpu_mask; static int allocate_copy_idle_injection_mask(const struct cpumask *copy_mask) { if (cpumask_available(idle_injection_cpu_mask)) goto copy_mask; /* This mask is allocated only one time and freed during module exit */ if (!alloc_cpumask_var(&idle_injection_cpu_mask, GFP_KERNEL)) return -ENOMEM; copy_mask: cpumask_copy(idle_injection_cpu_mask, copy_mask); return 0; } /* Return true if the cpumask and idle percent combination is invalid */ static bool check_invalid(cpumask_var_t mask, u8 idle) { if (cpumask_equal(cpu_present_mask, mask) && idle > MAX_ALL_CPU_IDLE) return true; return false; } static int cpumask_set(const char *arg, const struct kernel_param *kp) { cpumask_var_t new_mask; int ret; mutex_lock(&powerclamp_lock); /* Can't set mask when cooling device is in use */ if (powerclamp_data.clamping) { ret = -EAGAIN; goto skip_cpumask_set; } ret = alloc_cpumask_var(&new_mask, GFP_KERNEL); if (!ret) goto skip_cpumask_set; ret = bitmap_parse(arg, strlen(arg), cpumask_bits(new_mask), nr_cpumask_bits); if (ret) goto free_cpumask_set; if (cpumask_empty(new_mask) || check_invalid(new_mask, max_idle)) { ret = -EINVAL; goto free_cpumask_set; } /* * When module parameters are passed from kernel command line * during insmod, the module parameter callback is called * before powerclamp_init(), so we can't assume that some * cpumask can be allocated and copied before here. Also * in this case this cpumask is used as the default mask. */ ret = allocate_copy_idle_injection_mask(new_mask); free_cpumask_set: free_cpumask_var(new_mask); skip_cpumask_set: mutex_unlock(&powerclamp_lock); return ret; } static int cpumask_get(char *buf, const struct kernel_param *kp) { if (!cpumask_available(idle_injection_cpu_mask)) return -ENODEV; return bitmap_print_to_pagebuf(false, buf, cpumask_bits(idle_injection_cpu_mask), nr_cpumask_bits); } static const struct kernel_param_ops cpumask_ops = { .set = cpumask_set, .get = cpumask_get, }; module_param_cb(cpumask, &cpumask_ops, NULL, 0644); MODULE_PARM_DESC(cpumask, "Mask of CPUs to use for idle injection."); static int max_idle_set(const char *arg, const struct kernel_param *kp) { u8 new_max_idle; int ret = 0; mutex_lock(&powerclamp_lock); /* Can't set mask when cooling device is in use */ if (powerclamp_data.clamping) { ret = -EAGAIN; goto skip_limit_set; } ret = kstrtou8(arg, 10, &new_max_idle); if (ret) goto skip_limit_set; if (new_max_idle > MAX_TARGET_RATIO) { ret = -EINVAL; goto skip_limit_set; } if (!cpumask_available(idle_injection_cpu_mask)) { ret = allocate_copy_idle_injection_mask(cpu_present_mask); if (ret) goto skip_limit_set; } if (check_invalid(idle_injection_cpu_mask, new_max_idle)) { ret = -EINVAL; goto skip_limit_set; } max_idle = new_max_idle; skip_limit_set: mutex_unlock(&powerclamp_lock); return ret; } static const struct kernel_param_ops max_idle_ops = { .set = max_idle_set, .get = param_get_int, }; module_param_cb(max_idle, &max_idle_ops, &max_idle, 0644); MODULE_PARM_DESC(max_idle, "maximum injected idle time to the total CPU time ratio in percent range:1-100"); struct powerclamp_calibration_data { unsigned long confidence; /* used for calibration, basically a counter * gets incremented each time a clamping * period is completed without extra wakeups * once that counter is reached given level, * compensation is deemed usable. */ unsigned long steady_comp; /* steady state compensation used when * no extra wakeups occurred. */ unsigned long dynamic_comp; /* compensate excessive wakeup from idle * mostly from external interrupts. */ }; static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO]; static int window_size_set(const char *arg, const struct kernel_param *kp) { int ret = 0; unsigned long new_window_size; ret = kstrtoul(arg, 10, &new_window_size); if (ret) goto exit_win; if (new_window_size > 10 || new_window_size < 2) { pr_err("Out of recommended window size %lu, between 2-10\n", new_window_size); ret = -EINVAL; } window_size = clamp(new_window_size, 2ul, 10ul); smp_mb(); exit_win: return ret; } static const struct kernel_param_ops window_size_ops = { .set = window_size_set, .get = param_get_int, }; module_param_cb(window_size, &window_size_ops, &window_size, 0644); MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n" "\tpowerclamp controls idle ratio within this window. larger\n" "\twindow size results in slower response time but more smooth\n" "\tclamping results. default to 2."); static void find_target_mwait(void) { unsigned int eax, ebx, ecx, edx; unsigned int highest_cstate = 0; unsigned int highest_subcstate = 0; int i; if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) return; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) return; edx >>= MWAIT_SUBSTATE_SIZE; for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { if (edx & MWAIT_SUBSTATE_MASK) { highest_cstate = i; highest_subcstate = edx & MWAIT_SUBSTATE_MASK; } } target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) | (highest_subcstate - 1); } struct pkg_cstate_info { bool skip; int msr_index; int cstate_id; }; #define PKG_CSTATE_INIT(id) { \ .msr_index = MSR_PKG_C##id##_RESIDENCY, \ .cstate_id = id \ } static struct pkg_cstate_info pkg_cstates[] = { PKG_CSTATE_INIT(2), PKG_CSTATE_INIT(3), PKG_CSTATE_INIT(6), PKG_CSTATE_INIT(7), PKG_CSTATE_INIT(8), PKG_CSTATE_INIT(9), PKG_CSTATE_INIT(10), {NULL}, }; static bool has_pkg_state_counter(void) { u64 val; struct pkg_cstate_info *info = pkg_cstates; /* check if any one of the counter msrs exists */ while (info->msr_index) { if (!rdmsrl_safe(info->msr_index, &val)) return true; info++; } return false; } static u64 pkg_state_counter(void) { u64 val; u64 count = 0; struct pkg_cstate_info *info = pkg_cstates; while (info->msr_index) { if (!info->skip) { if (!rdmsrl_safe(info->msr_index, &val)) count += val; else info->skip = true; } info++; } return count; } static unsigned int get_compensation(int ratio) { unsigned int comp = 0; if (!poll_pkg_cstate_enable) return 0; /* we only use compensation if all adjacent ones are good */ if (ratio == 1 && cal_data[ratio].confidence >= CONFIDENCE_OK && cal_data[ratio + 1].confidence >= CONFIDENCE_OK && cal_data[ratio + 2].confidence >= CONFIDENCE_OK) { comp = (cal_data[ratio].steady_comp + cal_data[ratio + 1].steady_comp + cal_data[ratio + 2].steady_comp) / 3; } else if (ratio == MAX_TARGET_RATIO - 1 && cal_data[ratio].confidence >= CONFIDENCE_OK && cal_data[ratio - 1].confidence >= CONFIDENCE_OK && cal_data[ratio - 2].confidence >= CONFIDENCE_OK) { comp = (cal_data[ratio].steady_comp + cal_data[ratio - 1].steady_comp + cal_data[ratio - 2].steady_comp) / 3; } else if (cal_data[ratio].confidence >= CONFIDENCE_OK && cal_data[ratio - 1].confidence >= CONFIDENCE_OK && cal_data[ratio + 1].confidence >= CONFIDENCE_OK) { comp = (cal_data[ratio].steady_comp + cal_data[ratio - 1].steady_comp + cal_data[ratio + 1].steady_comp) / 3; } /* do not exceed limit */ if (comp + ratio >= MAX_TARGET_RATIO) comp = MAX_TARGET_RATIO - ratio - 1; return comp; } static void adjust_compensation(int target_ratio, unsigned int win) { int delta; struct powerclamp_calibration_data *d = &cal_data[target_ratio]; /* * adjust compensations if confidence level has not been reached. */ if (d->confidence >= CONFIDENCE_OK) return; delta = powerclamp_data.target_ratio - current_ratio; /* filter out bad data */ if (delta >= 0 && delta <= (1+target_ratio/10)) { if (d->steady_comp) d->steady_comp = roundup(delta+d->steady_comp, 2)/2; else d->steady_comp = delta; d->confidence++; } } static bool powerclamp_adjust_controls(unsigned int target_ratio, unsigned int guard, unsigned int win) { static u64 msr_last, tsc_last; u64 msr_now, tsc_now; u64 val64; /* check result for the last window */ msr_now = pkg_state_counter(); tsc_now = rdtsc(); /* calculate pkg cstate vs tsc ratio */ if (!msr_last || !tsc_last) current_ratio = 1; else if (tsc_now-tsc_last) { val64 = 100*(msr_now-msr_last); do_div(val64, (tsc_now-tsc_last)); current_ratio = val64; } /* update record */ msr_last = msr_now; tsc_last = tsc_now; adjust_compensation(target_ratio, win); /* if we are above target+guard, skip */ return powerclamp_data.target_ratio + guard <= current_ratio; } /* * This function calculates runtime from the current target ratio. * This function gets called under powerclamp_lock. */ static unsigned int get_run_time(void) { unsigned int compensated_ratio; unsigned int runtime; /* * make sure user selected ratio does not take effect until * the next round. adjust target_ratio if user has changed * target such that we can converge quickly. */ powerclamp_data.guard = 1 + powerclamp_data.target_ratio / 20; powerclamp_data.window_size_now = window_size; /* * systems may have different ability to enter package level * c-states, thus we need to compensate the injected idle ratio * to achieve the actual target reported by the HW. */ compensated_ratio = powerclamp_data.target_ratio + get_compensation(powerclamp_data.target_ratio); if (compensated_ratio <= 0) compensated_ratio = 1; runtime = duration * 100 / compensated_ratio - duration; return runtime; } /* * 1 HZ polling while clamping is active, useful for userspace * to monitor actual idle ratio. */ static void poll_pkg_cstate(struct work_struct *dummy); static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate); static void poll_pkg_cstate(struct work_struct *dummy) { static u64 msr_last; static u64 tsc_last; u64 msr_now; u64 tsc_now; u64 val64; msr_now = pkg_state_counter(); tsc_now = rdtsc(); /* calculate pkg cstate vs tsc ratio */ if (!msr_last || !tsc_last) pkg_cstate_ratio_cur = 1; else { if (tsc_now - tsc_last) { val64 = 100 * (msr_now - msr_last); do_div(val64, (tsc_now - tsc_last)); pkg_cstate_ratio_cur = val64; } } /* update record */ msr_last = msr_now; tsc_last = tsc_now; mutex_lock(&powerclamp_lock); if (powerclamp_data.clamping) schedule_delayed_work(&poll_pkg_cstate_work, HZ); mutex_unlock(&powerclamp_lock); } static struct idle_inject_device *ii_dev; /* * This function is called from idle injection core on timer expiry * for the run duration. This allows powerclamp to readjust or skip * injecting idle for this cycle. */ static bool idle_inject_update(void) { bool update = false; /* We can't sleep in this callback */ if (!mutex_trylock(&powerclamp_lock)) return true; if (!(powerclamp_data.count % powerclamp_data.window_size_now)) { should_skip = powerclamp_adjust_controls(powerclamp_data.target_ratio, powerclamp_data.guard, powerclamp_data.window_size_now); update = true; } if (update) { unsigned int runtime = get_run_time(); idle_inject_set_duration(ii_dev, runtime, duration); } powerclamp_data.count++; mutex_unlock(&powerclamp_lock); if (should_skip) return false; return true; } /* This function starts idle injection by calling idle_inject_start() */ static void trigger_idle_injection(void) { unsigned int runtime = get_run_time(); idle_inject_set_duration(ii_dev, runtime, duration); idle_inject_start(ii_dev); powerclamp_data.clamping = true; } /* * This function is called from start_power_clamp() to register * CPUS with powercap idle injection register and set default * idle duration and latency. */ static int powerclamp_idle_injection_register(void) { poll_pkg_cstate_enable = false; if (cpumask_equal(cpu_present_mask, idle_injection_cpu_mask)) { ii_dev = idle_inject_register_full(idle_injection_cpu_mask, idle_inject_update); if (topology_max_packages() == 1 && topology_max_die_per_package() == 1) poll_pkg_cstate_enable = true; } else { ii_dev = idle_inject_register(idle_injection_cpu_mask); } if (!ii_dev) { pr_err("powerclamp: idle_inject_register failed\n"); return -EAGAIN; } idle_inject_set_duration(ii_dev, TICK_USEC, duration); idle_inject_set_latency(ii_dev, UINT_MAX); return 0; } /* * This function is called from end_power_clamp() to stop idle injection * and unregister CPUS from powercap idle injection core. */ static void remove_idle_injection(void) { if (!powerclamp_data.clamping) return; powerclamp_data.clamping = false; idle_inject_stop(ii_dev); } /* * This function is called when user change the cooling device * state from zero to some other value. */ static int start_power_clamp(void) { int ret; ret = powerclamp_idle_injection_register(); if (!ret) { trigger_idle_injection(); if (poll_pkg_cstate_enable) schedule_delayed_work(&poll_pkg_cstate_work, 0); } return ret; } /* * This function is called when user change the cooling device * state from non zero value zero. */ static void end_power_clamp(void) { if (powerclamp_data.clamping) { remove_idle_injection(); idle_inject_unregister(ii_dev); } } static int powerclamp_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { *state = MAX_TARGET_RATIO; return 0; } static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { mutex_lock(&powerclamp_lock); *state = powerclamp_data.target_ratio; mutex_unlock(&powerclamp_lock); return 0; } static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev, unsigned long new_target_ratio) { int ret = 0; mutex_lock(&powerclamp_lock); new_target_ratio = clamp(new_target_ratio, 0UL, (unsigned long) (max_idle - 1)); if (powerclamp_data.target_ratio == new_target_ratio) goto exit_set; if (!powerclamp_data.target_ratio && new_target_ratio > 0) { pr_info("Start idle injection to reduce power\n"); powerclamp_data.target_ratio = new_target_ratio; ret = start_power_clamp(); if (ret) powerclamp_data.target_ratio = 0; goto exit_set; } else if (powerclamp_data.target_ratio > 0 && new_target_ratio == 0) { pr_info("Stop forced idle injection\n"); end_power_clamp(); powerclamp_data.target_ratio = 0; } else /* adjust currently running */ { unsigned int runtime; powerclamp_data.target_ratio = new_target_ratio; runtime = get_run_time(); idle_inject_set_duration(ii_dev, runtime, duration); } exit_set: mutex_unlock(&powerclamp_lock); return ret; } /* bind to generic thermal layer as cooling device*/ static const struct thermal_cooling_device_ops powerclamp_cooling_ops = { .get_max_state = powerclamp_get_max_state, .get_cur_state = powerclamp_get_cur_state, .set_cur_state = powerclamp_set_cur_state, }; static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_MWAIT, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); static int __init powerclamp_probe(void) { if (!x86_match_cpu(intel_powerclamp_ids)) { pr_err("CPU does not support MWAIT\n"); return -ENODEV; } /* The goal for idle time alignment is to achieve package cstate. */ if (!has_pkg_state_counter()) { pr_info("No package C-state available\n"); return -ENODEV; } /* find the deepest mwait value */ find_target_mwait(); return 0; } static int powerclamp_debug_show(struct seq_file *m, void *unused) { int i = 0; seq_printf(m, "pct confidence steady dynamic (compensation)\n"); for (i = 0; i < MAX_TARGET_RATIO; i++) { seq_printf(m, "%d\t%lu\t%lu\t%lu\n", i, cal_data[i].confidence, cal_data[i].steady_comp, cal_data[i].dynamic_comp); } return 0; } DEFINE_SHOW_ATTRIBUTE(powerclamp_debug); static inline void powerclamp_create_debug_files(void) { debug_dir = debugfs_create_dir("intel_powerclamp", NULL); debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir, cal_data, &powerclamp_debug_fops); } static int __init powerclamp_init(void) { int retval; /* probe cpu features and ids here */ retval = powerclamp_probe(); if (retval) return retval; mutex_lock(&powerclamp_lock); if (!cpumask_available(idle_injection_cpu_mask)) retval = allocate_copy_idle_injection_mask(cpu_present_mask); mutex_unlock(&powerclamp_lock); if (retval) return retval; /* set default limit, maybe adjusted during runtime based on feedback */ window_size = 2; cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL, &powerclamp_cooling_ops); if (IS_ERR(cooling_dev)) return -ENODEV; if (!duration) duration = jiffies_to_usecs(DEFAULT_DURATION_JIFFIES); powerclamp_create_debug_files(); return 0; } module_init(powerclamp_init); static void __exit powerclamp_exit(void) { mutex_lock(&powerclamp_lock); end_power_clamp(); mutex_unlock(&powerclamp_lock); thermal_cooling_device_unregister(cooling_dev); cancel_delayed_work_sync(&poll_pkg_cstate_work); debugfs_remove_recursive(debug_dir); if (cpumask_available(idle_injection_cpu_mask)) free_cpumask_var(idle_injection_cpu_mask); } module_exit(powerclamp_exit); MODULE_IMPORT_NS(IDLE_INJECT); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arjan van de Ven <[email protected]>"); MODULE_AUTHOR("Jacob Pan <[email protected]>"); MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");
linux-master
drivers/thermal/intel/intel_powerclamp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Hardware Feedback Interface Driver * * Copyright (c) 2021, Intel Corporation. * * Authors: Aubrey Li <[email protected]> * Ricardo Neri <[email protected]> * * * The Hardware Feedback Interface provides a performance and energy efficiency * capability information for each CPU in the system. Depending on the processor * model, hardware may periodically update these capabilities as a result of * changes in the operating conditions (e.g., power limits or thermal * constraints). On other processor models, there is a single HFI update * at boot. * * This file provides functionality to process HFI updates and relay these * updates to userspace. */ #define pr_fmt(fmt) "intel-hfi: " fmt #include <linux/bitops.h> #include <linux/cpufeature.h> #include <linux/cpumask.h> #include <linux/gfp.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/math.h> #include <linux/mutex.h> #include <linux/percpu-defs.h> #include <linux/printk.h> #include <linux/processor.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/topology.h> #include <linux/workqueue.h> #include <asm/msr.h> #include "intel_hfi.h" #include "thermal_interrupt.h" #include "../thermal_netlink.h" /* Hardware Feedback Interface MSR configuration bits */ #define HW_FEEDBACK_PTR_VALID_BIT BIT(0) #define HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT BIT(0) /* CPUID detection and enumeration definitions for HFI */ #define CPUID_HFI_LEAF 6 union hfi_capabilities { struct { u8 performance:1; u8 energy_efficiency:1; u8 __reserved:6; } split; u8 bits; }; union cpuid6_edx { struct { union hfi_capabilities capabilities; u32 table_pages:4; u32 __reserved:4; s32 index:16; } split; u32 full; }; /** * struct hfi_cpu_data - HFI capabilities per CPU * @perf_cap: Performance capability * @ee_cap: Energy efficiency capability * * Capabilities of a logical processor in the HFI table. These capabilities are * unitless. */ struct hfi_cpu_data { u8 perf_cap; u8 ee_cap; } __packed; /** * struct hfi_hdr - Header of the HFI table * @perf_updated: Hardware updated performance capabilities * @ee_updated: Hardware updated energy efficiency capabilities * * Properties of the data in an HFI table. */ struct hfi_hdr { u8 perf_updated; u8 ee_updated; } __packed; /** * struct hfi_instance - Representation of an HFI instance (i.e., a table) * @local_table: Base of the local copy of the HFI table * @timestamp: Timestamp of the last update of the local table. * Located at the base of the local table. * @hdr: Base address of the header of the local table * @data: Base address of the data of the local table * @cpus: CPUs represented in this HFI table instance * @hw_table: Pointer to the HFI table of this instance * @update_work: Delayed work to process HFI updates * @table_lock: Lock to protect acceses to the table of this instance * @event_lock: Lock to process HFI interrupts * * A set of parameters to parse and navigate a specific HFI table. */ struct hfi_instance { union { void *local_table; u64 *timestamp; }; void *hdr; void *data; cpumask_var_t cpus; void *hw_table; struct delayed_work update_work; raw_spinlock_t table_lock; raw_spinlock_t event_lock; }; /** * struct hfi_features - Supported HFI features * @nr_table_pages: Size of the HFI table in 4KB pages * @cpu_stride: Stride size to locate the capability data of a logical * processor within the table (i.e., row stride) * @hdr_size: Size of the table header * * Parameters and supported features that are common to all HFI instances */ struct hfi_features { size_t nr_table_pages; unsigned int cpu_stride; unsigned int hdr_size; }; /** * struct hfi_cpu_info - Per-CPU attributes to consume HFI data * @index: Row of this CPU in its HFI table * @hfi_instance: Attributes of the HFI table to which this CPU belongs * * Parameters to link a logical processor to an HFI table and a row within it. */ struct hfi_cpu_info { s16 index; struct hfi_instance *hfi_instance; }; static DEFINE_PER_CPU(struct hfi_cpu_info, hfi_cpu_info) = { .index = -1 }; static int max_hfi_instances; static struct hfi_instance *hfi_instances; static struct hfi_features hfi_features; static DEFINE_MUTEX(hfi_instance_lock); static struct workqueue_struct *hfi_updates_wq; #define HFI_UPDATE_INTERVAL HZ #define HFI_MAX_THERM_NOTIFY_COUNT 16 static void get_hfi_caps(struct hfi_instance *hfi_instance, struct thermal_genl_cpu_caps *cpu_caps) { int cpu, i = 0; raw_spin_lock_irq(&hfi_instance->table_lock); for_each_cpu(cpu, hfi_instance->cpus) { struct hfi_cpu_data *caps; s16 index; index = per_cpu(hfi_cpu_info, cpu).index; caps = hfi_instance->data + index * hfi_features.cpu_stride; cpu_caps[i].cpu = cpu; /* * Scale performance and energy efficiency to * the [0, 1023] interval that thermal netlink uses. */ cpu_caps[i].performance = caps->perf_cap << 2; cpu_caps[i].efficiency = caps->ee_cap << 2; ++i; } raw_spin_unlock_irq(&hfi_instance->table_lock); } /* * Call update_capabilities() when there are changes in the HFI table. */ static void update_capabilities(struct hfi_instance *hfi_instance) { struct thermal_genl_cpu_caps *cpu_caps; int i = 0, cpu_count; /* CPUs may come online/offline while processing an HFI update. */ mutex_lock(&hfi_instance_lock); cpu_count = cpumask_weight(hfi_instance->cpus); /* No CPUs to report in this hfi_instance. */ if (!cpu_count) goto out; cpu_caps = kcalloc(cpu_count, sizeof(*cpu_caps), GFP_KERNEL); if (!cpu_caps) goto out; get_hfi_caps(hfi_instance, cpu_caps); if (cpu_count < HFI_MAX_THERM_NOTIFY_COUNT) goto last_cmd; /* Process complete chunks of HFI_MAX_THERM_NOTIFY_COUNT capabilities. */ for (i = 0; (i + HFI_MAX_THERM_NOTIFY_COUNT) <= cpu_count; i += HFI_MAX_THERM_NOTIFY_COUNT) thermal_genl_cpu_capability_event(HFI_MAX_THERM_NOTIFY_COUNT, &cpu_caps[i]); cpu_count = cpu_count - i; last_cmd: /* Process the remaining capabilities if any. */ if (cpu_count) thermal_genl_cpu_capability_event(cpu_count, &cpu_caps[i]); kfree(cpu_caps); out: mutex_unlock(&hfi_instance_lock); } static void hfi_update_work_fn(struct work_struct *work) { struct hfi_instance *hfi_instance; hfi_instance = container_of(to_delayed_work(work), struct hfi_instance, update_work); update_capabilities(hfi_instance); } void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) { struct hfi_instance *hfi_instance; int cpu = smp_processor_id(); struct hfi_cpu_info *info; u64 new_timestamp, msr, hfi; if (!pkg_therm_status_msr_val) return; info = &per_cpu(hfi_cpu_info, cpu); if (!info) return; /* * A CPU is linked to its HFI instance before the thermal vector in the * local APIC is unmasked. Hence, info->hfi_instance cannot be NULL * when receiving an HFI event. */ hfi_instance = info->hfi_instance; if (unlikely(!hfi_instance)) { pr_debug("Received event on CPU %d but instance was null", cpu); return; } /* * On most systems, all CPUs in the package receive a package-level * thermal interrupt when there is an HFI update. It is sufficient to * let a single CPU to acknowledge the update and queue work to * process it. The remaining CPUs can resume their work. */ if (!raw_spin_trylock(&hfi_instance->event_lock)) return; rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr); hfi = msr & PACKAGE_THERM_STATUS_HFI_UPDATED; if (!hfi) { raw_spin_unlock(&hfi_instance->event_lock); return; } /* * Ack duplicate update. Since there is an active HFI * status from HW, it must be a new event, not a case * where a lagging CPU entered the locked region. */ new_timestamp = *(u64 *)hfi_instance->hw_table; if (*hfi_instance->timestamp == new_timestamp) { thermal_clear_package_intr_status(PACKAGE_LEVEL, PACKAGE_THERM_STATUS_HFI_UPDATED); raw_spin_unlock(&hfi_instance->event_lock); return; } raw_spin_lock(&hfi_instance->table_lock); /* * Copy the updated table into our local copy. This includes the new * timestamp. */ memcpy(hfi_instance->local_table, hfi_instance->hw_table, hfi_features.nr_table_pages << PAGE_SHIFT); /* * Let hardware know that we are done reading the HFI table and it is * free to update it again. */ thermal_clear_package_intr_status(PACKAGE_LEVEL, PACKAGE_THERM_STATUS_HFI_UPDATED); raw_spin_unlock(&hfi_instance->table_lock); raw_spin_unlock(&hfi_instance->event_lock); queue_delayed_work(hfi_updates_wq, &hfi_instance->update_work, HFI_UPDATE_INTERVAL); } static void init_hfi_cpu_index(struct hfi_cpu_info *info) { union cpuid6_edx edx; /* Do not re-read @cpu's index if it has already been initialized. */ if (info->index > -1) return; edx.full = cpuid_edx(CPUID_HFI_LEAF); info->index = edx.split.index; } /* * The format of the HFI table depends on the number of capabilities that the * hardware supports. Keep a data structure to navigate the table. */ static void init_hfi_instance(struct hfi_instance *hfi_instance) { /* The HFI header is below the time-stamp. */ hfi_instance->hdr = hfi_instance->local_table + sizeof(*hfi_instance->timestamp); /* The HFI data starts below the header. */ hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size; } /** * intel_hfi_online() - Enable HFI on @cpu * @cpu: CPU in which the HFI will be enabled * * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package * level. The first CPU in the die/package to come online does the full HFI * initialization. Subsequent CPUs will just link themselves to the HFI * instance of their die/package. * * This function is called before enabling the thermal vector in the local APIC * in order to ensure that @cpu has an associated HFI instance when it receives * an HFI event. */ void intel_hfi_online(unsigned int cpu) { struct hfi_instance *hfi_instance; struct hfi_cpu_info *info; phys_addr_t hw_table_pa; u64 msr_val; u16 die_id; /* Nothing to do if hfi_instances are missing. */ if (!hfi_instances) return; /* * Link @cpu to the HFI instance of its package/die. It does not * matter whether the instance has been initialized. */ info = &per_cpu(hfi_cpu_info, cpu); die_id = topology_logical_die_id(cpu); hfi_instance = info->hfi_instance; if (!hfi_instance) { if (die_id >= max_hfi_instances) return; hfi_instance = &hfi_instances[die_id]; info->hfi_instance = hfi_instance; } init_hfi_cpu_index(info); /* * Now check if the HFI instance of the package/die of @cpu has been * initialized (by checking its header). In such case, all we have to * do is to add @cpu to this instance's cpumask. */ mutex_lock(&hfi_instance_lock); if (hfi_instance->hdr) { cpumask_set_cpu(cpu, hfi_instance->cpus); goto unlock; } /* * Hardware is programmed with the physical address of the first page * frame of the table. Hence, the allocated memory must be page-aligned. */ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages, GFP_KERNEL | __GFP_ZERO); if (!hfi_instance->hw_table) goto unlock; hw_table_pa = virt_to_phys(hfi_instance->hw_table); /* * Allocate memory to keep a local copy of the table that * hardware generates. */ hfi_instance->local_table = kzalloc(hfi_features.nr_table_pages << PAGE_SHIFT, GFP_KERNEL); if (!hfi_instance->local_table) goto free_hw_table; /* * Program the address of the feedback table of this die/package. On * some processors, hardware remembers the old address of the HFI table * even after having been reprogrammed and re-enabled. Thus, do not free * the pages allocated for the table or reprogram the hardware with a * new base address. Namely, program the hardware only once. */ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); init_hfi_instance(hfi_instance); INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn); raw_spin_lock_init(&hfi_instance->table_lock); raw_spin_lock_init(&hfi_instance->event_lock); cpumask_set_cpu(cpu, hfi_instance->cpus); /* * Enable the hardware feedback interface and never disable it. See * comment on programming the address of the table. */ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); unlock: mutex_unlock(&hfi_instance_lock); return; free_hw_table: free_pages_exact(hfi_instance->hw_table, hfi_features.nr_table_pages); goto unlock; } /** * intel_hfi_offline() - Disable HFI on @cpu * @cpu: CPU in which the HFI will be disabled * * Remove @cpu from those covered by its HFI instance. * * On some processors, hardware remembers previous programming settings even * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the * die/package of @cpu are offline. See note in intel_hfi_online(). */ void intel_hfi_offline(unsigned int cpu) { struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu); struct hfi_instance *hfi_instance; /* * Check if @cpu as an associated, initialized (i.e., with a non-NULL * header). Also, HFI instances are only initialized if X86_FEATURE_HFI * is present. */ hfi_instance = info->hfi_instance; if (!hfi_instance) return; if (!hfi_instance->hdr) return; mutex_lock(&hfi_instance_lock); cpumask_clear_cpu(cpu, hfi_instance->cpus); mutex_unlock(&hfi_instance_lock); } static __init int hfi_parse_features(void) { unsigned int nr_capabilities; union cpuid6_edx edx; if (!boot_cpu_has(X86_FEATURE_HFI)) return -ENODEV; /* * If we are here we know that CPUID_HFI_LEAF exists. Parse the * supported capabilities and the size of the HFI table. */ edx.full = cpuid_edx(CPUID_HFI_LEAF); if (!edx.split.capabilities.split.performance) { pr_debug("Performance reporting not supported! Not using HFI\n"); return -ENODEV; } /* * The number of supported capabilities determines the number of * columns in the HFI table. Exclude the reserved bits. */ edx.split.capabilities.split.__reserved = 0; nr_capabilities = hweight8(edx.split.capabilities.bits); /* The number of 4KB pages required by the table */ hfi_features.nr_table_pages = edx.split.table_pages + 1; /* * The header contains change indications for each supported feature. * The size of the table header is rounded up to be a multiple of 8 * bytes. */ hfi_features.hdr_size = DIV_ROUND_UP(nr_capabilities, 8) * 8; /* * Data of each logical processor is also rounded up to be a multiple * of 8 bytes. */ hfi_features.cpu_stride = DIV_ROUND_UP(nr_capabilities, 8) * 8; return 0; } void __init intel_hfi_init(void) { struct hfi_instance *hfi_instance; int i, j; if (hfi_parse_features()) return; /* There is one HFI instance per die/package. */ max_hfi_instances = topology_max_packages() * topology_max_die_per_package(); /* * This allocation may fail. CPU hotplug callbacks must check * for a null pointer. */ hfi_instances = kcalloc(max_hfi_instances, sizeof(*hfi_instances), GFP_KERNEL); if (!hfi_instances) return; for (i = 0; i < max_hfi_instances; i++) { hfi_instance = &hfi_instances[i]; if (!zalloc_cpumask_var(&hfi_instance->cpus, GFP_KERNEL)) goto err_nomem; } hfi_updates_wq = create_singlethread_workqueue("hfi-updates"); if (!hfi_updates_wq) goto err_nomem; return; err_nomem: for (j = 0; j < i; ++j) { hfi_instance = &hfi_instances[j]; free_cpumask_var(hfi_instance->cpus); } kfree(hfi_instances); hfi_instances = NULL; }
linux-master
drivers/thermal/intel/intel_hfi.c
// SPDX-License-Identifier: GPL-2.0-only /* * int340x_thermal_zone.c * Copyright (c) 2015, Intel Corporation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/thermal.h> #include <linux/units.h> #include "int340x_thermal_zone.h" static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone, int *temp) { struct int34x_thermal_zone *d = thermal_zone_device_priv(zone); unsigned long long tmp; acpi_status status; status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp); if (ACPI_FAILURE(status)) return -EIO; if (d->lpat_table) { int conv_temp; conv_temp = acpi_lpat_raw_to_temp(d->lpat_table, (int)tmp); if (conv_temp < 0) return conv_temp; *temp = conv_temp * 10; } else { /* _TMP returns the temperature in tenths of degrees Kelvin */ *temp = deci_kelvin_to_millicelsius(tmp); } return 0; } static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip, int temp) { struct int34x_thermal_zone *d = thermal_zone_device_priv(zone); char name[] = {'P', 'A', 'T', '0' + trip, '\0'}; acpi_status status; if (trip > 9) return -EINVAL; status = acpi_execute_simple_method(d->adev->handle, name, millicelsius_to_deci_kelvin(temp)); if (ACPI_FAILURE(status)) return -EIO; return 0; } static void int340x_thermal_critical(struct thermal_zone_device *zone) { dev_dbg(&zone->device, "%s: critical temperature reached\n", zone->type); } static struct thermal_zone_device_ops int340x_thermal_zone_ops = { .get_temp = int340x_thermal_get_zone_temp, .set_trip_temp = int340x_thermal_set_trip_temp, .critical = int340x_thermal_critical, }; static int int340x_thermal_read_trips(struct acpi_device *zone_adev, struct thermal_trip *zone_trips, int trip_cnt) { int i, ret; ret = thermal_acpi_critical_trip_temp(zone_adev, &zone_trips[trip_cnt].temperature); if (!ret) { zone_trips[trip_cnt].type = THERMAL_TRIP_CRITICAL; trip_cnt++; } ret = thermal_acpi_hot_trip_temp(zone_adev, &zone_trips[trip_cnt].temperature); if (!ret) { zone_trips[trip_cnt].type = THERMAL_TRIP_HOT; trip_cnt++; } ret = thermal_acpi_passive_trip_temp(zone_adev, &zone_trips[trip_cnt].temperature); if (!ret) { zone_trips[trip_cnt].type = THERMAL_TRIP_PASSIVE; trip_cnt++; } for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) { ret = thermal_acpi_active_trip_temp(zone_adev, i, &zone_trips[trip_cnt].temperature); if (ret) break; zone_trips[trip_cnt].type = THERMAL_TRIP_ACTIVE; trip_cnt++; } return trip_cnt; } static struct thermal_zone_params int340x_thermal_params = { .governor_name = "user_space", .no_hwmon = true, }; struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, int (*get_temp) (struct thermal_zone_device *, int *)) { struct int34x_thermal_zone *int34x_zone; struct thermal_trip *zone_trips; unsigned long long trip_cnt = 0; unsigned long long hyst; int trip_mask = 0; acpi_status status; int i, ret; int34x_zone = kzalloc(sizeof(*int34x_zone), GFP_KERNEL); if (!int34x_zone) return ERR_PTR(-ENOMEM); int34x_zone->adev = adev; int34x_zone->ops = kmemdup(&int340x_thermal_zone_ops, sizeof(int340x_thermal_zone_ops), GFP_KERNEL); if (!int34x_zone->ops) { ret = -ENOMEM; goto err_ops_alloc; } if (get_temp) int34x_zone->ops->get_temp = get_temp; status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt); if (ACPI_SUCCESS(status)) { int34x_zone->aux_trip_nr = trip_cnt; trip_mask = BIT(trip_cnt) - 1; } zone_trips = kzalloc(sizeof(*zone_trips) * (trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT), GFP_KERNEL); if (!zone_trips) { ret = -ENOMEM; goto err_trips_alloc; } for (i = 0; i < trip_cnt; i++) { zone_trips[i].type = THERMAL_TRIP_PASSIVE; zone_trips[i].temperature = THERMAL_TEMP_INVALID; } trip_cnt = int340x_thermal_read_trips(adev, zone_trips, trip_cnt); status = acpi_evaluate_integer(adev->handle, "GTSH", NULL, &hyst); if (ACPI_SUCCESS(status)) hyst *= 100; else hyst = 0; for (i = 0; i < trip_cnt; ++i) zone_trips[i].hysteresis = hyst; int34x_zone->trips = zone_trips; int34x_zone->lpat_table = acpi_lpat_get_conversion_table(adev->handle); int34x_zone->zone = thermal_zone_device_register_with_trips( acpi_device_bid(adev), zone_trips, trip_cnt, trip_mask, int34x_zone, int34x_zone->ops, &int340x_thermal_params, 0, 0); if (IS_ERR(int34x_zone->zone)) { ret = PTR_ERR(int34x_zone->zone); goto err_thermal_zone; } ret = thermal_zone_device_enable(int34x_zone->zone); if (ret) goto err_enable; return int34x_zone; err_enable: thermal_zone_device_unregister(int34x_zone->zone); err_thermal_zone: kfree(int34x_zone->trips); acpi_lpat_free_conversion_table(int34x_zone->lpat_table); err_trips_alloc: kfree(int34x_zone->ops); err_ops_alloc: kfree(int34x_zone); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(int340x_thermal_zone_add); void int340x_thermal_zone_remove(struct int34x_thermal_zone *int34x_zone) { thermal_zone_device_unregister(int34x_zone->zone); acpi_lpat_free_conversion_table(int34x_zone->lpat_table); kfree(int34x_zone->trips); kfree(int34x_zone->ops); kfree(int34x_zone); } EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); void int340x_thermal_update_trips(struct int34x_thermal_zone *int34x_zone) { struct acpi_device *zone_adev = int34x_zone->adev; struct thermal_trip *zone_trips = int34x_zone->trips; int trip_cnt = int34x_zone->zone->num_trips; int act_trip_nr = 0; int i; mutex_lock(&int34x_zone->zone->lock); for (i = int34x_zone->aux_trip_nr; i < trip_cnt; i++) { int temp, err; switch (zone_trips[i].type) { case THERMAL_TRIP_CRITICAL: err = thermal_acpi_critical_trip_temp(zone_adev, &temp); break; case THERMAL_TRIP_HOT: err = thermal_acpi_hot_trip_temp(zone_adev, &temp); break; case THERMAL_TRIP_PASSIVE: err = thermal_acpi_passive_trip_temp(zone_adev, &temp); break; case THERMAL_TRIP_ACTIVE: err = thermal_acpi_active_trip_temp(zone_adev, act_trip_nr++, &temp); break; default: err = -ENODEV; } if (err) { zone_trips[i].temperature = THERMAL_TEMP_INVALID; continue; } zone_trips[i].temperature = temp; } mutex_unlock(&int34x_zone->zone->lock); } EXPORT_SYMBOL_GPL(int340x_thermal_update_trips); MODULE_AUTHOR("Aaron Lu <[email protected]>"); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_DESCRIPTION("Intel INT340x common thermal zone handler"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
// SPDX-License-Identifier: GPL-2.0-only /* * ACPI INT3403 thermal driver * Copyright (c) 2013, Intel Corporation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/acpi.h> #include <linux/thermal.h> #include <linux/platform_device.h> #include "int340x_thermal_zone.h" #define INT3403_TYPE_SENSOR 0x03 #define INT3403_TYPE_CHARGER 0x0B #define INT3403_TYPE_BATTERY 0x0C #define INT3403_PERF_CHANGED_EVENT 0x80 #define INT3403_PERF_TRIP_POINT_CHANGED 0x81 #define INT3403_THERMAL_EVENT 0x90 /* Preserved structure for future expandbility */ struct int3403_sensor { struct int34x_thermal_zone *int340x_zone; }; struct int3403_performance_state { u64 performance; u64 power; u64 latency; u64 linear; u64 control; u64 raw_performace; char *raw_unit; int reserved; }; struct int3403_cdev { struct thermal_cooling_device *cdev; unsigned long max_state; }; struct int3403_priv { struct platform_device *pdev; struct acpi_device *adev; unsigned long long type; void *priv; }; static void int3403_notify(acpi_handle handle, u32 event, void *data) { struct int3403_priv *priv = data; struct int3403_sensor *obj; if (!priv) return; obj = priv->priv; if (priv->type != INT3403_TYPE_SENSOR || !obj) return; switch (event) { case INT3403_PERF_CHANGED_EVENT: break; case INT3403_THERMAL_EVENT: int340x_thermal_zone_device_update(obj->int340x_zone, THERMAL_TRIP_VIOLATED); break; case INT3403_PERF_TRIP_POINT_CHANGED: int340x_thermal_update_trips(obj->int340x_zone); int340x_thermal_zone_device_update(obj->int340x_zone, THERMAL_TRIP_CHANGED); break; default: dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); break; } } static int int3403_sensor_add(struct int3403_priv *priv) { int result = 0; struct int3403_sensor *obj; obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL); if (!obj) return -ENOMEM; priv->priv = obj; obj->int340x_zone = int340x_thermal_zone_add(priv->adev, NULL); if (IS_ERR(obj->int340x_zone)) return PTR_ERR(obj->int340x_zone); result = acpi_install_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY, int3403_notify, (void *)priv); if (result) goto err_free_obj; return 0; err_free_obj: int340x_thermal_zone_remove(obj->int340x_zone); return result; } static int int3403_sensor_remove(struct int3403_priv *priv) { struct int3403_sensor *obj = priv->priv; acpi_remove_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY, int3403_notify); int340x_thermal_zone_remove(obj->int340x_zone); return 0; } /* INT3403 Cooling devices */ static int int3403_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct int3403_priv *priv = cdev->devdata; struct int3403_cdev *obj = priv->priv; *state = obj->max_state; return 0; } static int int3403_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct int3403_priv *priv = cdev->devdata; unsigned long long level; acpi_status status; status = acpi_evaluate_integer(priv->adev->handle, "PPPC", NULL, &level); if (ACPI_SUCCESS(status)) { *state = level; return 0; } else return -EINVAL; } static int int3403_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct int3403_priv *priv = cdev->devdata; acpi_status status; status = acpi_execute_simple_method(priv->adev->handle, "SPPC", state); if (ACPI_SUCCESS(status)) return 0; else return -EINVAL; } static const struct thermal_cooling_device_ops int3403_cooling_ops = { .get_max_state = int3403_get_max_state, .get_cur_state = int3403_get_cur_state, .set_cur_state = int3403_set_cur_state, }; static int int3403_cdev_add(struct int3403_priv *priv) { int result = 0; acpi_status status; struct int3403_cdev *obj; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *p; obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL); if (!obj) return -ENOMEM; status = acpi_evaluate_object(priv->adev->handle, "PPSS", NULL, &buf); if (ACPI_FAILURE(status)) return -ENODEV; p = buf.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { pr_warn("Invalid PPSS data\n"); kfree(buf.pointer); return -EFAULT; } priv->priv = obj; obj->max_state = p->package.count - 1; obj->cdev = thermal_cooling_device_register(acpi_device_bid(priv->adev), priv, &int3403_cooling_ops); if (IS_ERR(obj->cdev)) result = PTR_ERR(obj->cdev); kfree(buf.pointer); /* TODO: add ACPI notification support */ return result; } static int int3403_cdev_remove(struct int3403_priv *priv) { struct int3403_cdev *obj = priv->priv; thermal_cooling_device_unregister(obj->cdev); return 0; } static int int3403_add(struct platform_device *pdev) { struct int3403_priv *priv; int result = 0; unsigned long long tmp; acpi_status status; priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->pdev = pdev; priv->adev = ACPI_COMPANION(&(pdev->dev)); if (!priv->adev) { result = -EINVAL; goto err; } status = acpi_evaluate_integer(priv->adev->handle, "_TMP", NULL, &tmp); if (ACPI_FAILURE(status)) { status = acpi_evaluate_integer(priv->adev->handle, "PTYP", NULL, &priv->type); if (ACPI_FAILURE(status)) { result = -EINVAL; goto err; } } else { priv->type = INT3403_TYPE_SENSOR; } platform_set_drvdata(pdev, priv); switch (priv->type) { case INT3403_TYPE_SENSOR: result = int3403_sensor_add(priv); break; case INT3403_TYPE_CHARGER: case INT3403_TYPE_BATTERY: result = int3403_cdev_add(priv); break; default: result = -EINVAL; } if (result) goto err; return result; err: return result; } static int int3403_remove(struct platform_device *pdev) { struct int3403_priv *priv = platform_get_drvdata(pdev); switch (priv->type) { case INT3403_TYPE_SENSOR: int3403_sensor_remove(priv); break; case INT3403_TYPE_CHARGER: case INT3403_TYPE_BATTERY: int3403_cdev_remove(priv); break; default: break; } return 0; } static const struct acpi_device_id int3403_device_ids[] = { {"INT3403", 0}, {"INTC1043", 0}, {"INTC1046", 0}, {"INTC1062", 0}, {"INTC10A1", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3403_device_ids); static struct platform_driver int3403_driver = { .probe = int3403_add, .remove = int3403_remove, .driver = { .name = "int3403 thermal", .acpi_match_table = int3403_device_ids, }, }; module_platform_driver(int3403_driver); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
linux-master
drivers/thermal/intel/int340x_thermal/int3403_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * B0D4 processor thermal device * Copyright (c) 2020, Intel Corporation. */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/thermal.h> #include "int340x_thermal_zone.h" #include "processor_thermal_device.h" #include "../intel_soc_dts_iosf.h" #define DRV_NAME "proc_thermal" static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid) { struct proc_thermal_device *proc_priv; struct pci_dev *pdev = devid; proc_priv = pci_get_drvdata(pdev); intel_soc_dts_iosf_interrupt_handler(proc_priv->soc_dts); return IRQ_HANDLED; } static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct proc_thermal_device *proc_priv; int ret; ret = pcim_enable_device(pdev); if (ret < 0) { dev_err(&pdev->dev, "error: could not enable device\n"); return ret; } proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL); if (!proc_priv) return -ENOMEM; ret = proc_thermal_add(&pdev->dev, proc_priv); if (ret) return ret; pci_set_drvdata(pdev, proc_priv); if (pdev->device == PCI_DEVICE_ID_INTEL_BSW_THERMAL) { /* * Enumerate additional DTS sensors available via IOSF. * But we are not treating as a failure condition, if * there are no aux DTSs enabled or fails. This driver * already exposes sensors, which can be accessed via * ACPI/MSR. So we don't want to fail for auxiliary DTSs. */ proc_priv->soc_dts = intel_soc_dts_iosf_init( INTEL_SOC_DTS_INTERRUPT_MSI, false, 0); if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) { ret = pci_enable_msi(pdev); if (!ret) { ret = request_threaded_irq(pdev->irq, NULL, proc_thermal_pci_msi_irq, IRQF_ONESHOT, "proc_thermal", pdev); if (ret) { intel_soc_dts_iosf_exit( proc_priv->soc_dts); pci_disable_msi(pdev); proc_priv->soc_dts = NULL; } } } else dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); } else { } ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data); if (ret) { proc_thermal_remove(proc_priv); return ret; } return 0; } static void proc_thermal_pci_remove(struct pci_dev *pdev) { struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev); if (proc_priv->soc_dts) { intel_soc_dts_iosf_exit(proc_priv->soc_dts); if (pdev->irq) { free_irq(pdev->irq, pdev); pci_disable_msi(pdev); } } proc_thermal_mmio_remove(pdev, proc_priv); proc_thermal_remove(proc_priv); } #ifdef CONFIG_PM_SLEEP static int proc_thermal_pci_suspend(struct device *dev) { return proc_thermal_suspend(dev); } static int proc_thermal_pci_resume(struct device *dev) { return proc_thermal_resume(dev); } #else #define proc_thermal_pci_suspend NULL #define proc_thermal_pci_resume NULL #endif static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend, proc_thermal_pci_resume); static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, BDW_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, BSW_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, BXT0_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, BXT1_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, BXTX_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, BXTP_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, CNL_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, CFL_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, GLK_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, HSB_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, ICL_THERMAL, PROC_THERMAL_FEATURE_RAPL) }, { PCI_DEVICE_DATA(INTEL, JSL_THERMAL, 0) }, { PCI_DEVICE_DATA(INTEL, SKL_THERMAL, PROC_THERMAL_FEATURE_RAPL) }, { PCI_DEVICE_DATA(INTEL, TGL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_MBOX) }, { }, }; MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids); static struct pci_driver proc_thermal_pci_driver = { .name = DRV_NAME, .probe = proc_thermal_pci_probe, .remove = proc_thermal_pci_remove, .id_table = proc_thermal_pci_ids, .driver.pm = &proc_thermal_pci_pm, }; module_pci_driver(proc_thermal_pci_driver); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
// SPDX-License-Identifier: GPL-2.0-only /* * processor thermal device mailbox driver for Workload type hints * Copyright (c) 2020, Intel Corporation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "processor_thermal_device.h" #define MBOX_CMD_WORKLOAD_TYPE_READ 0x0E #define MBOX_CMD_WORKLOAD_TYPE_WRITE 0x0F #define MBOX_OFFSET_DATA 0x5810 #define MBOX_OFFSET_INTERFACE 0x5818 #define MBOX_BUSY_BIT 31 #define MBOX_RETRY_COUNT 100 #define MBOX_DATA_BIT_VALID 31 #define MBOX_DATA_BIT_AC_DC 30 static DEFINE_MUTEX(mbox_lock); static int wait_for_mbox_ready(struct proc_thermal_device *proc_priv) { u32 retries, data; int ret; /* Poll for rb bit == 0 */ retries = MBOX_RETRY_COUNT; do { data = readl(proc_priv->mmio_base + MBOX_OFFSET_INTERFACE); if (data & BIT_ULL(MBOX_BUSY_BIT)) { ret = -EBUSY; continue; } ret = 0; break; } while (--retries); return ret; } static int send_mbox_write_cmd(struct pci_dev *pdev, u16 id, u32 data) { struct proc_thermal_device *proc_priv; u32 reg_data; int ret; proc_priv = pci_get_drvdata(pdev); mutex_lock(&mbox_lock); ret = wait_for_mbox_ready(proc_priv); if (ret) goto unlock_mbox; writel(data, (proc_priv->mmio_base + MBOX_OFFSET_DATA)); /* Write command register */ reg_data = BIT_ULL(MBOX_BUSY_BIT) | id; writel(reg_data, (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE)); ret = wait_for_mbox_ready(proc_priv); unlock_mbox: mutex_unlock(&mbox_lock); return ret; } static int send_mbox_read_cmd(struct pci_dev *pdev, u16 id, u64 *resp) { struct proc_thermal_device *proc_priv; u32 reg_data; int ret; proc_priv = pci_get_drvdata(pdev); mutex_lock(&mbox_lock); ret = wait_for_mbox_ready(proc_priv); if (ret) goto unlock_mbox; /* Write command register */ reg_data = BIT_ULL(MBOX_BUSY_BIT) | id; writel(reg_data, (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE)); ret = wait_for_mbox_ready(proc_priv); if (ret) goto unlock_mbox; if (id == MBOX_CMD_WORKLOAD_TYPE_READ) *resp = readl(proc_priv->mmio_base + MBOX_OFFSET_DATA); else *resp = readq(proc_priv->mmio_base + MBOX_OFFSET_DATA); unlock_mbox: mutex_unlock(&mbox_lock); return ret; } int processor_thermal_send_mbox_read_cmd(struct pci_dev *pdev, u16 id, u64 *resp) { return send_mbox_read_cmd(pdev, id, resp); } EXPORT_SYMBOL_NS_GPL(processor_thermal_send_mbox_read_cmd, INT340X_THERMAL); int processor_thermal_send_mbox_write_cmd(struct pci_dev *pdev, u16 id, u32 data) { return send_mbox_write_cmd(pdev, id, data); } EXPORT_SYMBOL_NS_GPL(processor_thermal_send_mbox_write_cmd, INT340X_THERMAL); /* List of workload types */ static const char * const workload_types[] = { "none", "idle", "semi_active", "bursty", "sustained", "battery_life", NULL }; static ssize_t workload_available_types_show(struct device *dev, struct device_attribute *attr, char *buf) { int i = 0; int ret = 0; while (workload_types[i] != NULL) ret += sprintf(&buf[ret], "%s ", workload_types[i++]); ret += sprintf(&buf[ret], "\n"); return ret; } static DEVICE_ATTR_RO(workload_available_types); static ssize_t workload_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); char str_preference[15]; u32 data = 0; ssize_t ret; ret = sscanf(buf, "%14s", str_preference); if (ret != 1) return -EINVAL; ret = match_string(workload_types, -1, str_preference); if (ret < 0) return ret; ret &= 0xff; if (ret) data = BIT(MBOX_DATA_BIT_VALID) | BIT(MBOX_DATA_BIT_AC_DC); data |= ret; ret = send_mbox_write_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_WRITE, data); if (ret) return false; return count; } static ssize_t workload_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); u64 cmd_resp; int ret; ret = send_mbox_read_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, &cmd_resp); if (ret) return false; cmd_resp &= 0xff; if (cmd_resp > ARRAY_SIZE(workload_types) - 1) return -EINVAL; return sprintf(buf, "%s\n", workload_types[cmd_resp]); } static DEVICE_ATTR_RW(workload_type); static struct attribute *workload_req_attrs[] = { &dev_attr_workload_available_types.attr, &dev_attr_workload_type.attr, NULL }; static const struct attribute_group workload_req_attribute_group = { .attrs = workload_req_attrs, .name = "workload_request" }; static bool workload_req_created; int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv) { u64 cmd_resp; int ret; /* Check if there is a mailbox support, if fails return success */ ret = send_mbox_read_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, &cmd_resp); if (ret) return 0; ret = sysfs_create_group(&pdev->dev.kobj, &workload_req_attribute_group); if (ret) return ret; workload_req_created = true; return 0; } EXPORT_SYMBOL_GPL(proc_thermal_mbox_add); void proc_thermal_mbox_remove(struct pci_dev *pdev) { if (workload_req_created) sysfs_remove_group(&pdev->dev.kobj, &workload_req_attribute_group); workload_req_created = false; } EXPORT_SYMBOL_GPL(proc_thermal_mbox_remove); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
// SPDX-License-Identifier: GPL-2.0-only /* * processor thermal device RFIM control * Copyright (c) 2020, Intel Corporation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include "processor_thermal_device.h" static struct rapl_if_priv rapl_mmio_priv; static const struct rapl_mmio_regs rapl_mmio_default = { .reg_unit = 0x5938, .regs[RAPL_DOMAIN_PACKAGE] = { 0x59a0, 0x593c, 0x58f0, 0, 0x5930}, .regs[RAPL_DOMAIN_DRAM] = { 0x58e0, 0x58e8, 0x58ec, 0, 0}, .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2), .limits[RAPL_DOMAIN_DRAM] = BIT(POWER_LIMIT2), }; static int rapl_mmio_cpu_online(unsigned int cpu) { struct rapl_package *rp; /* mmio rapl supports package 0 only for now */ if (topology_physical_package_id(cpu)) return 0; rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); if (!rp) { rp = rapl_add_package(cpu, &rapl_mmio_priv, true); if (IS_ERR(rp)) return PTR_ERR(rp); } cpumask_set_cpu(cpu, &rp->cpumask); return 0; } static int rapl_mmio_cpu_down_prep(unsigned int cpu) { struct rapl_package *rp; int lead_cpu; rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); if (!rp) return 0; cpumask_clear_cpu(cpu, &rp->cpumask); lead_cpu = cpumask_first(&rp->cpumask); if (lead_cpu >= nr_cpu_ids) rapl_remove_package(rp); else if (rp->lead_cpu == cpu) rp->lead_cpu = lead_cpu; return 0; } static int rapl_mmio_read_raw(int cpu, struct reg_action *ra) { if (!ra->reg.mmio) return -EINVAL; ra->value = readq(ra->reg.mmio); ra->value &= ra->mask; return 0; } static int rapl_mmio_write_raw(int cpu, struct reg_action *ra) { u64 val; if (!ra->reg.mmio) return -EINVAL; val = readq(ra->reg.mmio); val &= ~ra->mask; val |= ra->value; writeq(val, ra->reg.mmio); return 0; } int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv) { const struct rapl_mmio_regs *rapl_regs = &rapl_mmio_default; enum rapl_domain_reg_id reg; enum rapl_domain_type domain; int ret; if (!rapl_regs) return 0; for (domain = RAPL_DOMAIN_PACKAGE; domain < RAPL_DOMAIN_MAX; domain++) { for (reg = RAPL_DOMAIN_REG_LIMIT; reg < RAPL_DOMAIN_REG_MAX; reg++) if (rapl_regs->regs[domain][reg]) rapl_mmio_priv.regs[domain][reg].mmio = proc_priv->mmio_base + rapl_regs->regs[domain][reg]; rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain]; } rapl_mmio_priv.type = RAPL_IF_MMIO; rapl_mmio_priv.reg_unit.mmio = proc_priv->mmio_base + rapl_regs->reg_unit; rapl_mmio_priv.read_raw = rapl_mmio_read_raw; rapl_mmio_priv.write_raw = rapl_mmio_write_raw; rapl_mmio_priv.control_type = powercap_register_control_type(NULL, "intel-rapl-mmio", NULL); if (IS_ERR(rapl_mmio_priv.control_type)) { pr_debug("failed to register powercap control_type.\n"); return PTR_ERR(rapl_mmio_priv.control_type); } ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online", rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep); if (ret < 0) { powercap_unregister_control_type(rapl_mmio_priv.control_type); rapl_mmio_priv.control_type = NULL; return ret; } rapl_mmio_priv.pcap_rapl_online = ret; return 0; } EXPORT_SYMBOL_GPL(proc_thermal_rapl_add); void proc_thermal_rapl_remove(void) { if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type)) return; cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online); powercap_unregister_control_type(rapl_mmio_priv.control_type); } EXPORT_SYMBOL_GPL(proc_thermal_rapl_remove); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
// SPDX-License-Identifier: GPL-2.0-only /* * INT3402 thermal driver for memory temperature reporting * * Copyright (C) 2014, Intel Corporation * Authors: Aaron Lu <[email protected]> */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/thermal.h> #include "int340x_thermal_zone.h" #define INT3402_PERF_CHANGED_EVENT 0x80 #define INT3402_THERMAL_EVENT 0x90 struct int3402_thermal_data { acpi_handle *handle; struct int34x_thermal_zone *int340x_zone; }; static void int3402_notify(acpi_handle handle, u32 event, void *data) { struct int3402_thermal_data *priv = data; if (!priv) return; switch (event) { case INT3402_PERF_CHANGED_EVENT: break; case INT3402_THERMAL_EVENT: int340x_thermal_zone_device_update(priv->int340x_zone, THERMAL_TRIP_VIOLATED); break; default: break; } } static int int3402_thermal_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct int3402_thermal_data *d; int ret; if (!acpi_has_method(adev->handle, "_TMP")) return -ENODEV; d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; d->int340x_zone = int340x_thermal_zone_add(adev, NULL); if (IS_ERR(d->int340x_zone)) return PTR_ERR(d->int340x_zone); ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, int3402_notify, d); if (ret) { int340x_thermal_zone_remove(d->int340x_zone); return ret; } d->handle = adev->handle; platform_set_drvdata(pdev, d); return 0; } static int int3402_thermal_remove(struct platform_device *pdev) { struct int3402_thermal_data *d = platform_get_drvdata(pdev); acpi_remove_notify_handler(d->handle, ACPI_DEVICE_NOTIFY, int3402_notify); int340x_thermal_zone_remove(d->int340x_zone); return 0; } static const struct acpi_device_id int3402_thermal_match[] = { {"INT3402", 0}, {} }; MODULE_DEVICE_TABLE(acpi, int3402_thermal_match); static struct platform_driver int3402_thermal_driver = { .probe = int3402_thermal_probe, .remove = int3402_thermal_remove, .driver = { .name = "int3402 thermal", .acpi_match_table = int3402_thermal_match, }, }; module_platform_driver(int3402_thermal_driver); MODULE_DESCRIPTION("INT3402 Thermal driver"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/intel/int340x_thermal/int3402_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Processor thermal device for newer processors * Copyright (c) 2020, Intel Corporation. */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/thermal.h> #include "int340x_thermal_zone.h" #include "processor_thermal_device.h" #define DRV_NAME "proc_thermal_pci" struct proc_thermal_pci { struct pci_dev *pdev; struct proc_thermal_device *proc_priv; struct thermal_zone_device *tzone; struct delayed_work work; int stored_thres; int no_legacy; }; enum proc_thermal_mmio_type { PROC_THERMAL_MMIO_TJMAX, PROC_THERMAL_MMIO_PP0_TEMP, PROC_THERMAL_MMIO_PP1_TEMP, PROC_THERMAL_MMIO_PKG_TEMP, PROC_THERMAL_MMIO_THRES_0, PROC_THERMAL_MMIO_THRES_1, PROC_THERMAL_MMIO_INT_ENABLE_0, PROC_THERMAL_MMIO_INT_ENABLE_1, PROC_THERMAL_MMIO_INT_STATUS_0, PROC_THERMAL_MMIO_INT_STATUS_1, PROC_THERMAL_MMIO_MAX }; struct proc_thermal_mmio_info { enum proc_thermal_mmio_type mmio_type; u64 mmio_addr; u64 shift; u64 mask; }; static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = { { PROC_THERMAL_MMIO_TJMAX, 0x599c, 16, 0xff }, { PROC_THERMAL_MMIO_PP0_TEMP, 0x597c, 0, 0xff }, { PROC_THERMAL_MMIO_PP1_TEMP, 0x5980, 0, 0xff }, { PROC_THERMAL_MMIO_PKG_TEMP, 0x5978, 0, 0xff }, { PROC_THERMAL_MMIO_THRES_0, 0x5820, 8, 0x7F }, { PROC_THERMAL_MMIO_THRES_1, 0x5820, 16, 0x7F }, { PROC_THERMAL_MMIO_INT_ENABLE_0, 0x5820, 15, 0x01 }, { PROC_THERMAL_MMIO_INT_ENABLE_1, 0x5820, 23, 0x01 }, { PROC_THERMAL_MMIO_INT_STATUS_0, 0x7200, 6, 0x01 }, { PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 }, }; #define B0D4_THERMAL_NOTIFY_DELAY 1000 static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY; static void proc_thermal_mmio_read(struct proc_thermal_pci *pci_info, enum proc_thermal_mmio_type type, u32 *value) { *value = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base + proc_thermal_mmio_info[type].mmio_addr)); *value >>= proc_thermal_mmio_info[type].shift; *value &= proc_thermal_mmio_info[type].mask; } static void proc_thermal_mmio_write(struct proc_thermal_pci *pci_info, enum proc_thermal_mmio_type type, u32 value) { u32 current_val; u32 mask; current_val = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base + proc_thermal_mmio_info[type].mmio_addr)); mask = proc_thermal_mmio_info[type].mask << proc_thermal_mmio_info[type].shift; current_val &= ~mask; value &= proc_thermal_mmio_info[type].mask; value <<= proc_thermal_mmio_info[type].shift; current_val |= value; iowrite32(current_val, ((u8 __iomem *)pci_info->proc_priv->mmio_base + proc_thermal_mmio_info[type].mmio_addr)); } /* * To avoid sending two many messages to user space, we have 1 second delay. * On interrupt we are disabling interrupt and enabling after 1 second. * This workload function is delayed by 1 second. */ static void proc_thermal_threshold_work_fn(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct proc_thermal_pci *pci_info = container_of(delayed_work, struct proc_thermal_pci, work); struct thermal_zone_device *tzone = pci_info->tzone; if (tzone) thermal_zone_device_update(tzone, THERMAL_TRIP_VIOLATED); /* Enable interrupt flag */ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1); } static void pkg_thermal_schedule_work(struct delayed_work *work) { unsigned long ms = msecs_to_jiffies(notify_delay_ms); schedule_delayed_work(work, ms); } static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) { struct proc_thermal_pci *pci_info = devid; u32 status; proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status); /* Disable enable interrupt flag */ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); pci_write_config_byte(pci_info->pdev, 0xdc, 0x01); pkg_thermal_schedule_work(&pci_info->work); return IRQ_HANDLED; } static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) { struct proc_thermal_pci *pci_info = thermal_zone_device_priv(tzd); u32 _temp; proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_PKG_TEMP, &_temp); *temp = (unsigned long)_temp * 1000; return 0; } static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) { struct proc_thermal_pci *pci_info = thermal_zone_device_priv(tzd); int tjmax, _temp; if (temp <= 0) { cancel_delayed_work_sync(&pci_info->work); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0); pci_info->stored_thres = 0; return 0; } proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax); _temp = tjmax - (temp / 1000); if (_temp < 0) return -EINVAL; proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1); pci_info->stored_thres = temp; return 0; } static int get_trip_temp(struct proc_thermal_pci *pci_info) { int temp, tjmax; proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_THRES_0, &temp); if (!temp) return THERMAL_TEMP_INVALID; proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax); temp = (tjmax - temp) * 1000; return temp; } static struct thermal_trip psv_trip = { .type = THERMAL_TRIP_PASSIVE, }; static struct thermal_zone_device_ops tzone_ops = { .get_temp = sys_get_curr_temp, .set_trip_temp = sys_set_trip_temp, }; static struct thermal_zone_params tzone_params = { .governor_name = "user_space", .no_hwmon = true, }; static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct proc_thermal_device *proc_priv; struct proc_thermal_pci *pci_info; int irq_flag = 0, irq, ret; proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL); if (!proc_priv) return -ENOMEM; pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; pci_info->pdev = pdev; ret = pcim_enable_device(pdev); if (ret < 0) { dev_err(&pdev->dev, "error: could not enable device\n"); return ret; } pci_set_master(pdev); INIT_DELAYED_WORK(&pci_info->work, proc_thermal_threshold_work_fn); ret = proc_thermal_add(&pdev->dev, proc_priv); if (ret) { dev_err(&pdev->dev, "error: proc_thermal_add, will continue\n"); pci_info->no_legacy = 1; } proc_priv->priv_data = pci_info; pci_info->proc_priv = proc_priv; pci_set_drvdata(pdev, proc_priv); ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data); if (ret) goto err_ret_thermal; psv_trip.temperature = get_trip_temp(pci_info); pci_info->tzone = thermal_zone_device_register_with_trips("TCPU_PCI", &psv_trip, 1, 1, pci_info, &tzone_ops, &tzone_params, 0, 0); if (IS_ERR(pci_info->tzone)) { ret = PTR_ERR(pci_info->tzone); goto err_ret_mmio; } /* request and enable interrupt */ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (ret < 0) { dev_err(&pdev->dev, "Failed to allocate vectors!\n"); goto err_ret_tzone; } if (!pdev->msi_enabled && !pdev->msix_enabled) irq_flag = IRQF_SHARED; irq = pci_irq_vector(pdev, 0); ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler, NULL, irq_flag, KBUILD_MODNAME, pci_info); if (ret) { dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq); goto err_free_vectors; } ret = thermal_zone_device_enable(pci_info->tzone); if (ret) goto err_free_vectors; return 0; err_free_vectors: pci_free_irq_vectors(pdev); err_ret_tzone: thermal_zone_device_unregister(pci_info->tzone); err_ret_mmio: proc_thermal_mmio_remove(pdev, proc_priv); err_ret_thermal: if (!pci_info->no_legacy) proc_thermal_remove(proc_priv); pci_disable_device(pdev); return ret; } static void proc_thermal_pci_remove(struct pci_dev *pdev) { struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev); struct proc_thermal_pci *pci_info = proc_priv->priv_data; cancel_delayed_work_sync(&pci_info->work); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); devm_free_irq(&pdev->dev, pdev->irq, pci_info); pci_free_irq_vectors(pdev); thermal_zone_device_unregister(pci_info->tzone); proc_thermal_mmio_remove(pdev, pci_info->proc_priv); if (!pci_info->no_legacy) proc_thermal_remove(proc_priv); pci_disable_device(pdev); } #ifdef CONFIG_PM_SLEEP static int proc_thermal_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct proc_thermal_device *proc_priv; struct proc_thermal_pci *pci_info; proc_priv = pci_get_drvdata(pdev); pci_info = proc_priv->priv_data; if (!pci_info->no_legacy) return proc_thermal_suspend(dev); return 0; } static int proc_thermal_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct proc_thermal_device *proc_priv; struct proc_thermal_pci *pci_info; proc_priv = pci_get_drvdata(pdev); pci_info = proc_priv->priv_data; if (pci_info->stored_thres) { proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, pci_info->stored_thres / 1000); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1); } if (!pci_info->no_legacy) return proc_thermal_resume(dev); return 0; } #else #define proc_thermal_pci_suspend NULL #define proc_thermal_pci_resume NULL #endif static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend, proc_thermal_pci_resume); static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX | PROC_THERMAL_FEATURE_DLVR) }, { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, { }, }; MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids); static struct pci_driver proc_thermal_pci_driver = { .name = DRV_NAME, .probe = proc_thermal_pci_probe, .remove = proc_thermal_pci_remove, .id_table = proc_thermal_pci_ids, .driver.pm = &proc_thermal_pci_pm, }; module_pci_driver(proc_thermal_pci_driver); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * processor thermal device RFIM control * Copyright (c) 2020, Intel Corporation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include "processor_thermal_device.h" MODULE_IMPORT_NS(INT340X_THERMAL); struct mmio_reg { int read_only; u32 offset; int bits; u16 mask; u16 shift; }; /* These will represent sysfs attribute names */ static const char * const fivr_strings[] = { "vco_ref_code_lo", "vco_ref_code_hi", "spread_spectrum_pct", "spread_spectrum_clk_enable", "rfi_vco_ref_code", "fivr_fffc_rev", NULL }; static const struct mmio_reg tgl_fivr_mmio_regs[] = { { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */ { 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */ { 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */ { 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */ { 1, 0x5A10, 12, 0xFFF, 0}, /* rfi_vco_ref_code */ { 1, 0x5A14, 2, 0x3, 1}, /* fivr_fffc_rev */ }; static const char * const dlvr_strings[] = { "dlvr_spread_spectrum_pct", "dlvr_control_mode", "dlvr_control_lock", "dlvr_rfim_enable", "dlvr_freq_select", "dlvr_hardware_rev", "dlvr_freq_mhz", "dlvr_pll_busy", NULL }; static const struct mmio_reg dlvr_mmio_regs[] = { { 0, 0x15A08, 5, 0x1F, 0}, /* dlvr_spread_spectrum_pct */ { 0, 0x15A08, 1, 0x1, 5}, /* dlvr_control_mode */ { 0, 0x15A08, 1, 0x1, 6}, /* dlvr_control_lock */ { 0, 0x15A08, 1, 0x1, 7}, /* dlvr_rfim_enable */ { 0, 0x15A08, 12, 0xFFF, 8}, /* dlvr_freq_select */ { 1, 0x15A10, 2, 0x3, 30}, /* dlvr_hardware_rev */ { 1, 0x15A10, 16, 0xFFFF, 0}, /* dlvr_freq_mhz */ { 1, 0x15A10, 1, 0x1, 16}, /* dlvr_pll_busy */ }; /* These will represent sysfs attribute names */ static const char * const dvfs_strings[] = { "rfi_restriction_run_busy", "rfi_restriction_err_code", "rfi_restriction_data_rate", "rfi_restriction_data_rate_base", "ddr_data_rate_point_0", "ddr_data_rate_point_1", "ddr_data_rate_point_2", "ddr_data_rate_point_3", "rfi_disable", NULL }; static const struct mmio_reg adl_dvfs_mmio_regs[] = { { 0, 0x5A38, 1, 0x1, 31}, /* rfi_restriction_run_busy */ { 0, 0x5A38, 7, 0x7F, 24}, /* rfi_restriction_err_code */ { 0, 0x5A38, 8, 0xFF, 16}, /* rfi_restriction_data_rate */ { 0, 0x5A38, 16, 0xFFFF, 0}, /* rfi_restriction_data_rate_base */ { 0, 0x5A30, 10, 0x3FF, 0}, /* ddr_data_rate_point_0 */ { 0, 0x5A30, 10, 0x3FF, 10}, /* ddr_data_rate_point_1 */ { 0, 0x5A30, 10, 0x3FF, 20}, /* ddr_data_rate_point_2 */ { 0, 0x5A30, 10, 0x3FF, 30}, /* ddr_data_rate_point_3 */ { 0, 0x5A40, 1, 0x1, 0}, /* rfi_disable */ }; #define RFIM_SHOW(suffix, table)\ static ssize_t suffix##_show(struct device *dev,\ struct device_attribute *attr,\ char *buf)\ {\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ const struct mmio_reg *mmio_regs;\ const char **match_strs;\ u32 reg_val;\ int ret;\ \ proc_priv = pci_get_drvdata(pdev);\ if (table == 1) {\ match_strs = (const char **)dvfs_strings;\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ mmio_regs = dlvr_mmio_regs;\ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ } \ ret = match_string(match_strs, -1, attr->attr.name);\ if (ret < 0)\ return ret;\ reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\ ret = (reg_val >> mmio_regs[ret].shift) & mmio_regs[ret].mask;\ return sprintf(buf, "%u\n", ret);\ } #define RFIM_STORE(suffix, table)\ static ssize_t suffix##_store(struct device *dev,\ struct device_attribute *attr,\ const char *buf, size_t count)\ {\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ unsigned int input;\ const char **match_strs;\ const struct mmio_reg *mmio_regs;\ int ret, err;\ u32 reg_val;\ u32 mask;\ \ proc_priv = pci_get_drvdata(pdev);\ if (table == 1) {\ match_strs = (const char **)dvfs_strings;\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ mmio_regs = dlvr_mmio_regs;\ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ } \ \ ret = match_string(match_strs, -1, attr->attr.name);\ if (ret < 0)\ return ret;\ if (mmio_regs[ret].read_only)\ return -EPERM;\ err = kstrtouint(buf, 10, &input);\ if (err)\ return err;\ mask = GENMASK(mmio_regs[ret].shift + mmio_regs[ret].bits - 1, mmio_regs[ret].shift);\ reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\ reg_val &= ~mask;\ reg_val |= (input << mmio_regs[ret].shift);\ writel(reg_val, (void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\ return count;\ } RFIM_SHOW(vco_ref_code_lo, 0) RFIM_SHOW(vco_ref_code_hi, 0) RFIM_SHOW(spread_spectrum_pct, 0) RFIM_SHOW(spread_spectrum_clk_enable, 0) RFIM_SHOW(rfi_vco_ref_code, 0) RFIM_SHOW(fivr_fffc_rev, 0) RFIM_STORE(vco_ref_code_lo, 0) RFIM_STORE(vco_ref_code_hi, 0) RFIM_STORE(spread_spectrum_pct, 0) RFIM_STORE(spread_spectrum_clk_enable, 0) RFIM_STORE(rfi_vco_ref_code, 0) RFIM_STORE(fivr_fffc_rev, 0) RFIM_SHOW(dlvr_spread_spectrum_pct, 2) RFIM_SHOW(dlvr_control_mode, 2) RFIM_SHOW(dlvr_control_lock, 2) RFIM_SHOW(dlvr_hardware_rev, 2) RFIM_SHOW(dlvr_freq_mhz, 2) RFIM_SHOW(dlvr_pll_busy, 2) RFIM_SHOW(dlvr_freq_select, 2) RFIM_SHOW(dlvr_rfim_enable, 2) RFIM_STORE(dlvr_spread_spectrum_pct, 2) RFIM_STORE(dlvr_rfim_enable, 2) RFIM_STORE(dlvr_freq_select, 2) RFIM_STORE(dlvr_control_mode, 2) RFIM_STORE(dlvr_control_lock, 2) static DEVICE_ATTR_RW(dlvr_spread_spectrum_pct); static DEVICE_ATTR_RW(dlvr_control_mode); static DEVICE_ATTR_RW(dlvr_control_lock); static DEVICE_ATTR_RW(dlvr_freq_select); static DEVICE_ATTR_RO(dlvr_hardware_rev); static DEVICE_ATTR_RO(dlvr_freq_mhz); static DEVICE_ATTR_RO(dlvr_pll_busy); static DEVICE_ATTR_RW(dlvr_rfim_enable); static struct attribute *dlvr_attrs[] = { &dev_attr_dlvr_spread_spectrum_pct.attr, &dev_attr_dlvr_control_mode.attr, &dev_attr_dlvr_control_lock.attr, &dev_attr_dlvr_freq_select.attr, &dev_attr_dlvr_hardware_rev.attr, &dev_attr_dlvr_freq_mhz.attr, &dev_attr_dlvr_pll_busy.attr, &dev_attr_dlvr_rfim_enable.attr, NULL }; static const struct attribute_group dlvr_attribute_group = { .attrs = dlvr_attrs, .name = "dlvr" }; static DEVICE_ATTR_RW(vco_ref_code_lo); static DEVICE_ATTR_RW(vco_ref_code_hi); static DEVICE_ATTR_RW(spread_spectrum_pct); static DEVICE_ATTR_RW(spread_spectrum_clk_enable); static DEVICE_ATTR_RW(rfi_vco_ref_code); static DEVICE_ATTR_RW(fivr_fffc_rev); static struct attribute *fivr_attrs[] = { &dev_attr_vco_ref_code_lo.attr, &dev_attr_vco_ref_code_hi.attr, &dev_attr_spread_spectrum_pct.attr, &dev_attr_spread_spectrum_clk_enable.attr, &dev_attr_rfi_vco_ref_code.attr, &dev_attr_fivr_fffc_rev.attr, NULL }; static const struct attribute_group fivr_attribute_group = { .attrs = fivr_attrs, .name = "fivr" }; RFIM_SHOW(rfi_restriction_run_busy, 1) RFIM_SHOW(rfi_restriction_err_code, 1) RFIM_SHOW(rfi_restriction_data_rate, 1) RFIM_SHOW(rfi_restriction_data_rate_base, 1) RFIM_SHOW(ddr_data_rate_point_0, 1) RFIM_SHOW(ddr_data_rate_point_1, 1) RFIM_SHOW(ddr_data_rate_point_2, 1) RFIM_SHOW(ddr_data_rate_point_3, 1) RFIM_SHOW(rfi_disable, 1) RFIM_STORE(rfi_restriction_run_busy, 1) RFIM_STORE(rfi_restriction_err_code, 1) RFIM_STORE(rfi_restriction_data_rate, 1) RFIM_STORE(rfi_restriction_data_rate_base, 1) RFIM_STORE(rfi_disable, 1) static DEVICE_ATTR_RW(rfi_restriction_run_busy); static DEVICE_ATTR_RW(rfi_restriction_err_code); static DEVICE_ATTR_RW(rfi_restriction_data_rate); static DEVICE_ATTR_RW(rfi_restriction_data_rate_base); static DEVICE_ATTR_RO(ddr_data_rate_point_0); static DEVICE_ATTR_RO(ddr_data_rate_point_1); static DEVICE_ATTR_RO(ddr_data_rate_point_2); static DEVICE_ATTR_RO(ddr_data_rate_point_3); static DEVICE_ATTR_RW(rfi_disable); static ssize_t rfi_restriction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u16 id = 0x0008; u32 input; int ret; ret = kstrtou32(buf, 10, &input); if (ret) return ret; ret = processor_thermal_send_mbox_write_cmd(to_pci_dev(dev), id, input); if (ret) return ret; return count; } static ssize_t rfi_restriction_show(struct device *dev, struct device_attribute *attr, char *buf) { u16 id = 0x0007; u64 resp; int ret; ret = processor_thermal_send_mbox_read_cmd(to_pci_dev(dev), id, &resp); if (ret) return ret; return sprintf(buf, "%llu\n", resp); } static ssize_t ddr_data_rate_show(struct device *dev, struct device_attribute *attr, char *buf) { u16 id = 0x0107; u64 resp; int ret; ret = processor_thermal_send_mbox_read_cmd(to_pci_dev(dev), id, &resp); if (ret) return ret; return sprintf(buf, "%llu\n", resp); } static DEVICE_ATTR_RW(rfi_restriction); static DEVICE_ATTR_RO(ddr_data_rate); static struct attribute *dvfs_attrs[] = { &dev_attr_rfi_restriction_run_busy.attr, &dev_attr_rfi_restriction_err_code.attr, &dev_attr_rfi_restriction_data_rate.attr, &dev_attr_rfi_restriction_data_rate_base.attr, &dev_attr_ddr_data_rate_point_0.attr, &dev_attr_ddr_data_rate_point_1.attr, &dev_attr_ddr_data_rate_point_2.attr, &dev_attr_ddr_data_rate_point_3.attr, &dev_attr_rfi_disable.attr, &dev_attr_ddr_data_rate.attr, &dev_attr_rfi_restriction.attr, NULL }; static const struct attribute_group dvfs_attribute_group = { .attrs = dvfs_attrs, .name = "dvfs" }; int proc_thermal_rfim_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv) { int ret; if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR) { ret = sysfs_create_group(&pdev->dev.kobj, &fivr_attribute_group); if (ret) return ret; } if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR) { ret = sysfs_create_group(&pdev->dev.kobj, &dlvr_attribute_group); if (ret) return ret; } if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS) { ret = sysfs_create_group(&pdev->dev.kobj, &dvfs_attribute_group); if (ret && proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR) { sysfs_remove_group(&pdev->dev.kobj, &fivr_attribute_group); return ret; } if (ret && proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR) { sysfs_remove_group(&pdev->dev.kobj, &dlvr_attribute_group); return ret; } } return 0; } EXPORT_SYMBOL_GPL(proc_thermal_rfim_add); void proc_thermal_rfim_remove(struct pci_dev *pdev) { struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR) sysfs_remove_group(&pdev->dev.kobj, &fivr_attribute_group); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR) sysfs_remove_group(&pdev->dev.kobj, &dlvr_attribute_group); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS) sysfs_remove_group(&pdev->dev.kobj, &dvfs_attribute_group); } EXPORT_SYMBOL_GPL(proc_thermal_rfim_remove); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
// SPDX-License-Identifier: GPL-2.0-only /* * INT3401 processor thermal device * Copyright (c) 2020, Intel Corporation. */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/thermal.h> #include "int340x_thermal_zone.h" #include "processor_thermal_device.h" static const struct acpi_device_id int3401_device_ids[] = { {"INT3401", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3401_device_ids); static int int3401_add(struct platform_device *pdev) { struct proc_thermal_device *proc_priv; int ret; proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL); if (!proc_priv) return -ENOMEM; ret = proc_thermal_add(&pdev->dev, proc_priv); if (ret) return ret; platform_set_drvdata(pdev, proc_priv); return ret; } static int int3401_remove(struct platform_device *pdev) { proc_thermal_remove(platform_get_drvdata(pdev)); return 0; } #ifdef CONFIG_PM_SLEEP static int int3401_thermal_suspend(struct device *dev) { return proc_thermal_suspend(dev); } static int int3401_thermal_resume(struct device *dev) { return proc_thermal_resume(dev); } #else #define int3401_thermal_suspend NULL #define int3401_thermal_resume NULL #endif static SIMPLE_DEV_PM_OPS(int3401_proc_thermal_pm, int3401_thermal_suspend, int3401_thermal_resume); static struct platform_driver int3401_driver = { .probe = int3401_add, .remove = int3401_remove, .driver = { .name = "int3401 thermal", .acpi_match_table = int3401_device_ids, .pm = &int3401_proc_thermal_pm, }, }; module_platform_driver(int3401_driver); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/int3401_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* acpi_thermal_rel.c driver for exporting ACPI thermal relationship * * Copyright (c) 2014 Intel Corp */ /* * Two functionalities included: * 1. Export _TRT, _ART, via misc device interface to the userspace. * 2. Provide parsing result to kernel drivers * */ #include <linux/init.h> #include <linux/export.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/acpi.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include "acpi_thermal_rel.h" static acpi_handle acpi_thermal_rel_handle; static DEFINE_SPINLOCK(acpi_thermal_rel_chrdev_lock); static int acpi_thermal_rel_chrdev_count; /* #times opened */ static int acpi_thermal_rel_chrdev_exclu; /* already open exclusive? */ static int acpi_thermal_rel_open(struct inode *inode, struct file *file) { spin_lock(&acpi_thermal_rel_chrdev_lock); if (acpi_thermal_rel_chrdev_exclu || (acpi_thermal_rel_chrdev_count && (file->f_flags & O_EXCL))) { spin_unlock(&acpi_thermal_rel_chrdev_lock); return -EBUSY; } if (file->f_flags & O_EXCL) acpi_thermal_rel_chrdev_exclu = 1; acpi_thermal_rel_chrdev_count++; spin_unlock(&acpi_thermal_rel_chrdev_lock); return nonseekable_open(inode, file); } static int acpi_thermal_rel_release(struct inode *inode, struct file *file) { spin_lock(&acpi_thermal_rel_chrdev_lock); acpi_thermal_rel_chrdev_count--; acpi_thermal_rel_chrdev_exclu = 0; spin_unlock(&acpi_thermal_rel_chrdev_lock); return 0; } /** * acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling * * @handle: ACPI handle of the device contains _TRT * @trt_count: the number of valid entries resulted from parsing _TRT * @trtp: pointer to pointer of array of _TRT entries in parsing result * @create_dev: whether to create platform devices for target and source * */ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp, bool create_dev) { acpi_status status; int result = 0; int i; int nr_bad_entries = 0; struct trt *trts; union acpi_object *p; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer element = { 0, NULL }; struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" }; status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; p = buffer.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { pr_err("Invalid _TRT data\n"); result = -EFAULT; goto end; } *trt_count = p->package.count; trts = kcalloc(*trt_count, sizeof(struct trt), GFP_KERNEL); if (!trts) { result = -ENOMEM; goto end; } for (i = 0; i < *trt_count; i++) { struct trt *trt = &trts[i - nr_bad_entries]; element.length = sizeof(struct trt); element.pointer = trt; status = acpi_extract_package(&(p->package.elements[i]), &trt_format, &element); if (ACPI_FAILURE(status)) { nr_bad_entries++; pr_warn("_TRT package %d is invalid, ignored\n", i); continue; } if (!create_dev) continue; if (!acpi_fetch_acpi_dev(trt->source)) pr_warn("Failed to get source ACPI device\n"); if (!acpi_fetch_acpi_dev(trt->target)) pr_warn("Failed to get target ACPI device\n"); } result = 0; *trtp = trts; /* don't count bad entries */ *trt_count -= nr_bad_entries; end: kfree(buffer.pointer); return result; } EXPORT_SYMBOL(acpi_parse_trt); /** * acpi_parse_art - Parse Active Relationship Table _ART * * @handle: ACPI handle of the device contains _ART * @art_count: the number of valid entries resulted from parsing _ART * @artp: pointer to pointer of array of art entries in parsing result * @create_dev: whether to create platform devices for target and source * */ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, bool create_dev) { acpi_status status; int result = 0; int i; int nr_bad_entries = 0; struct art *arts; union acpi_object *p; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer element = { 0, NULL }; struct acpi_buffer art_format = { sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" }; status = acpi_evaluate_object(handle, "_ART", NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; p = buffer.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { pr_err("Invalid _ART data\n"); result = -EFAULT; goto end; } /* ignore p->package.elements[0], as this is _ART Revision field */ *art_count = p->package.count - 1; arts = kcalloc(*art_count, sizeof(struct art), GFP_KERNEL); if (!arts) { result = -ENOMEM; goto end; } for (i = 0; i < *art_count; i++) { struct art *art = &arts[i - nr_bad_entries]; element.length = sizeof(struct art); element.pointer = art; status = acpi_extract_package(&(p->package.elements[i + 1]), &art_format, &element); if (ACPI_FAILURE(status)) { pr_warn("_ART package %d is invalid, ignored", i); nr_bad_entries++; continue; } if (!create_dev) continue; if (!acpi_fetch_acpi_dev(art->source)) pr_warn("Failed to get source ACPI device\n"); if (!acpi_fetch_acpi_dev(art->target)) pr_warn("Failed to get target ACPI device\n"); } *artp = arts; /* don't count bad entries */ *art_count -= nr_bad_entries; end: kfree(buffer.pointer); return result; } EXPORT_SYMBOL(acpi_parse_art); /* * acpi_parse_psvt - Passive Table (PSVT) for passive cooling * * @handle: ACPI handle of the device which contains PSVT * @psvt_count: the number of valid entries resulted from parsing PSVT * @psvtp: pointer to array of psvt entries * */ static int acpi_parse_psvt(acpi_handle handle, int *psvt_count, struct psvt **psvtp) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; int nr_bad_entries = 0, revision = 0; union acpi_object *p; acpi_status status; int i, result = 0; struct psvt *psvts; if (!acpi_has_method(handle, "PSVT")) return -ENODEV; status = acpi_evaluate_object(handle, "PSVT", NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; p = buffer.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { result = -EFAULT; goto end; } /* first package is the revision number */ if (p->package.count > 0) { union acpi_object *prev = &(p->package.elements[0]); if (prev->type == ACPI_TYPE_INTEGER) revision = (int)prev->integer.value; } else { result = -EFAULT; goto end; } /* Support only version 2 */ if (revision != 2) { result = -EFAULT; goto end; } *psvt_count = p->package.count - 1; if (!*psvt_count) { result = -EFAULT; goto end; } psvts = kcalloc(*psvt_count, sizeof(*psvts), GFP_KERNEL); if (!psvts) { result = -ENOMEM; goto end; } /* Start index is 1 because the first package is the revision number */ for (i = 1; i < p->package.count; i++) { struct acpi_buffer psvt_int_format = { sizeof("RRNNNNNNNNNN"), "RRNNNNNNNNNN" }; struct acpi_buffer psvt_str_format = { sizeof("RRNNNNNSNNNN"), "RRNNNNNSNNNN" }; union acpi_object *package = &(p->package.elements[i]); struct psvt *psvt = &psvts[i - 1 - nr_bad_entries]; struct acpi_buffer *psvt_format = &psvt_int_format; struct acpi_buffer element = { 0, NULL }; union acpi_object *knob; struct acpi_device *res; struct psvt *psvt_ptr; element.length = ACPI_ALLOCATE_BUFFER; element.pointer = NULL; if (package->package.count >= ACPI_NR_PSVT_ELEMENTS) { knob = &(package->package.elements[ACPI_PSVT_CONTROL_KNOB]); } else { nr_bad_entries++; pr_info("PSVT package %d is invalid, ignored\n", i); continue; } if (knob->type == ACPI_TYPE_STRING) { psvt_format = &psvt_str_format; if (knob->string.length > ACPI_LIMIT_STR_MAX_LEN - 1) { pr_info("PSVT package %d limit string len exceeds max\n", i); knob->string.length = ACPI_LIMIT_STR_MAX_LEN - 1; } } status = acpi_extract_package(&(p->package.elements[i]), psvt_format, &element); if (ACPI_FAILURE(status)) { nr_bad_entries++; pr_info("PSVT package %d is invalid, ignored\n", i); continue; } psvt_ptr = (struct psvt *)element.pointer; memcpy(psvt, psvt_ptr, sizeof(*psvt)); /* The limit element can be string or U64 */ psvt->control_knob_type = (u64)knob->type; if (knob->type == ACPI_TYPE_STRING) { memset(&psvt->limit, 0, sizeof(u64)); strncpy(psvt->limit.string, psvt_ptr->limit.str_ptr, knob->string.length); } else { psvt->limit.integer = psvt_ptr->limit.integer; } kfree(element.pointer); res = acpi_fetch_acpi_dev(psvt->source); if (!res) { nr_bad_entries++; pr_info("Failed to get source ACPI device\n"); continue; } res = acpi_fetch_acpi_dev(psvt->target); if (!res) { nr_bad_entries++; pr_info("Failed to get target ACPI device\n"); continue; } } /* don't count bad entries */ *psvt_count -= nr_bad_entries; if (!*psvt_count) { result = -EFAULT; kfree(psvts); goto end; } *psvtp = psvts; return 0; end: kfree(buffer.pointer); return result; } /* get device name from acpi handle */ static void get_single_name(acpi_handle handle, char *name) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER}; if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer))) pr_warn("Failed to get device name from acpi handle\n"); else { memcpy(name, buffer.pointer, ACPI_NAMESEG_SIZE); kfree(buffer.pointer); } } static int fill_art(char __user *ubuf) { int i; int ret; int count; int art_len; struct art *arts = NULL; union art_object *art_user; ret = acpi_parse_art(acpi_thermal_rel_handle, &count, &arts, false); if (ret) goto free_art; art_len = count * sizeof(union art_object); art_user = kzalloc(art_len, GFP_KERNEL); if (!art_user) { ret = -ENOMEM; goto free_art; } /* now fill in user art data */ for (i = 0; i < count; i++) { /* userspace art needs device name instead of acpi reference */ get_single_name(arts[i].source, art_user[i].source_device); get_single_name(arts[i].target, art_user[i].target_device); /* copy the rest int data in addition to source and target */ BUILD_BUG_ON(sizeof(art_user[i].data) != sizeof(u64) * (ACPI_NR_ART_ELEMENTS - 2)); memcpy(&art_user[i].data, &arts[i].data, sizeof(art_user[i].data)); } if (copy_to_user(ubuf, art_user, art_len)) ret = -EFAULT; kfree(art_user); free_art: kfree(arts); return ret; } static int fill_trt(char __user *ubuf) { int i; int ret; int count; int trt_len; struct trt *trts = NULL; union trt_object *trt_user; ret = acpi_parse_trt(acpi_thermal_rel_handle, &count, &trts, false); if (ret) goto free_trt; trt_len = count * sizeof(union trt_object); trt_user = kzalloc(trt_len, GFP_KERNEL); if (!trt_user) { ret = -ENOMEM; goto free_trt; } /* now fill in user trt data */ for (i = 0; i < count; i++) { /* userspace trt needs device name instead of acpi reference */ get_single_name(trts[i].source, trt_user[i].source_device); get_single_name(trts[i].target, trt_user[i].target_device); trt_user[i].sample_period = trts[i].sample_period; trt_user[i].influence = trts[i].influence; } if (copy_to_user(ubuf, trt_user, trt_len)) ret = -EFAULT; kfree(trt_user); free_trt: kfree(trts); return ret; } static int fill_psvt(char __user *ubuf) { int i, ret, count, psvt_len; union psvt_object *psvt_user; struct psvt *psvts; ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts); if (ret) return ret; psvt_len = count * sizeof(*psvt_user); psvt_user = kzalloc(psvt_len, GFP_KERNEL); if (!psvt_user) { ret = -ENOMEM; goto free_psvt; } /* now fill in user psvt data */ for (i = 0; i < count; i++) { /* userspace psvt needs device name instead of acpi reference */ get_single_name(psvts[i].source, psvt_user[i].source_device); get_single_name(psvts[i].target, psvt_user[i].target_device); psvt_user[i].priority = psvts[i].priority; psvt_user[i].sample_period = psvts[i].sample_period; psvt_user[i].passive_temp = psvts[i].passive_temp; psvt_user[i].source_domain = psvts[i].source_domain; psvt_user[i].control_knob = psvts[i].control_knob; psvt_user[i].step_size = psvts[i].step_size; psvt_user[i].limit_coeff = psvts[i].limit_coeff; psvt_user[i].unlimit_coeff = psvts[i].unlimit_coeff; psvt_user[i].control_knob_type = psvts[i].control_knob_type; if (psvt_user[i].control_knob_type == ACPI_TYPE_STRING) strncpy(psvt_user[i].limit.string, psvts[i].limit.string, ACPI_LIMIT_STR_MAX_LEN); else psvt_user[i].limit.integer = psvts[i].limit.integer; } if (copy_to_user(ubuf, psvt_user, psvt_len)) ret = -EFAULT; kfree(psvt_user); free_psvt: kfree(psvts); return ret; } static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) { int ret = 0; unsigned long length = 0; int count = 0; char __user *arg = (void __user *)__arg; struct trt *trts = NULL; struct art *arts = NULL; struct psvt *psvts; switch (cmd) { case ACPI_THERMAL_GET_TRT_COUNT: ret = acpi_parse_trt(acpi_thermal_rel_handle, &count, &trts, false); kfree(trts); if (!ret) return put_user(count, (unsigned long __user *)__arg); return ret; case ACPI_THERMAL_GET_TRT_LEN: ret = acpi_parse_trt(acpi_thermal_rel_handle, &count, &trts, false); kfree(trts); length = count * sizeof(union trt_object); if (!ret) return put_user(length, (unsigned long __user *)__arg); return ret; case ACPI_THERMAL_GET_TRT: return fill_trt(arg); case ACPI_THERMAL_GET_ART_COUNT: ret = acpi_parse_art(acpi_thermal_rel_handle, &count, &arts, false); kfree(arts); if (!ret) return put_user(count, (unsigned long __user *)__arg); return ret; case ACPI_THERMAL_GET_ART_LEN: ret = acpi_parse_art(acpi_thermal_rel_handle, &count, &arts, false); kfree(arts); length = count * sizeof(union art_object); if (!ret) return put_user(length, (unsigned long __user *)__arg); return ret; case ACPI_THERMAL_GET_ART: return fill_art(arg); case ACPI_THERMAL_GET_PSVT_COUNT: ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts); if (!ret) { kfree(psvts); return put_user(count, (unsigned long __user *)__arg); } return ret; case ACPI_THERMAL_GET_PSVT_LEN: /* total length of the data retrieved (count * PSVT entry size) */ ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts); length = count * sizeof(union psvt_object); if (!ret) { kfree(psvts); return put_user(length, (unsigned long __user *)__arg); } return ret; case ACPI_THERMAL_GET_PSVT: return fill_psvt(arg); default: return -ENOTTY; } } static const struct file_operations acpi_thermal_rel_fops = { .owner = THIS_MODULE, .open = acpi_thermal_rel_open, .release = acpi_thermal_rel_release, .unlocked_ioctl = acpi_thermal_rel_ioctl, .llseek = no_llseek, }; static struct miscdevice acpi_thermal_rel_misc_device = { .minor = MISC_DYNAMIC_MINOR, "acpi_thermal_rel", &acpi_thermal_rel_fops }; int acpi_thermal_rel_misc_device_add(acpi_handle handle) { acpi_thermal_rel_handle = handle; return misc_register(&acpi_thermal_rel_misc_device); } EXPORT_SYMBOL(acpi_thermal_rel_misc_device_add); int acpi_thermal_rel_misc_device_remove(acpi_handle handle) { misc_deregister(&acpi_thermal_rel_misc_device); return 0; } EXPORT_SYMBOL(acpi_thermal_rel_misc_device_remove); MODULE_AUTHOR("Zhang Rui <[email protected]>"); MODULE_AUTHOR("Jacob Pan <[email protected]"); MODULE_DESCRIPTION("Intel acpi thermal rel misc dev driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
// SPDX-License-Identifier: GPL-2.0-only /* * INT3400 thermal driver * * Copyright (C) 2014, Intel Corporation * Authors: Zhang Rui <[email protected]> */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/thermal.h> #include "acpi_thermal_rel.h" #define INT3400_THERMAL_TABLE_CHANGED 0x83 #define INT3400_ODVP_CHANGED 0x88 #define INT3400_KEEP_ALIVE 0xA0 #define INT3400_FAKE_TEMP (20 * 1000) /* faked temp sensor with 20C */ enum int3400_thermal_uuid { INT3400_THERMAL_ACTIVE = 0, INT3400_THERMAL_PASSIVE_1, INT3400_THERMAL_CRITICAL, INT3400_THERMAL_ADAPTIVE_PERFORMANCE, INT3400_THERMAL_EMERGENCY_CALL_MODE, INT3400_THERMAL_PASSIVE_2, INT3400_THERMAL_POWER_BOSS, INT3400_THERMAL_VIRTUAL_SENSOR, INT3400_THERMAL_COOLING_MODE, INT3400_THERMAL_HARDWARE_DUTY_CYCLING, INT3400_THERMAL_MAXIMUM_UUID, }; static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { "3A95C389-E4B8-4629-A526-C52C88626BAE", "42A441D6-AE6A-462b-A84B-4A8CE79027D3", "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D", "5349962F-71E6-431D-9AE8-0A635B710AEE", "9E04115A-AE87-4D1C-9500-0F3E340BFE75", "F5A35014-C209-46A4-993A-EB56DE7530A1", "6ED722A7-9240-48A5-B479-31EEF723D7CF", "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531", "BE84BABF-C4D4-403D-B495-3128FD44dAC1", }; struct odvp_attr; struct int3400_thermal_priv { struct acpi_device *adev; struct platform_device *pdev; struct thermal_zone_device *thermal; int art_count; struct art *arts; int trt_count; struct trt *trts; u32 uuid_bitmap; int rel_misc_dev_res; int current_uuid_index; char *data_vault; int odvp_count; int *odvp; u32 os_uuid_mask; int production_mode; struct odvp_attr *odvp_attrs; }; static int evaluate_odvp(struct int3400_thermal_priv *priv); struct odvp_attr { int odvp; struct int3400_thermal_priv *priv; struct device_attribute attr; }; static ssize_t data_vault_read(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { memcpy(buf, attr->private + off, count); return count; } static BIN_ATTR_RO(data_vault, 0); static struct bin_attribute *data_attributes[] = { &bin_attr_data_vault, NULL, }; static ssize_t imok_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct int3400_thermal_priv *priv = dev_get_drvdata(dev); acpi_status status; int input, ret; ret = kstrtouint(buf, 10, &input); if (ret) return ret; status = acpi_execute_simple_method(priv->adev->handle, "IMOK", input); if (ACPI_FAILURE(status)) return -EIO; return count; } static DEVICE_ATTR_WO(imok); static struct attribute *imok_attr[] = { &dev_attr_imok.attr, NULL }; static const struct attribute_group imok_attribute_group = { .attrs = imok_attr, }; static const struct attribute_group data_attribute_group = { .bin_attrs = data_attributes, }; static ssize_t available_uuids_show(struct device *dev, struct device_attribute *attr, char *buf) { struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int i; int length = 0; if (!priv->uuid_bitmap) return sprintf(buf, "UNKNOWN\n"); for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) { if (priv->uuid_bitmap & (1 << i)) length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]); } return length; } static ssize_t current_uuid_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int i, length = 0; if (priv->current_uuid_index > 0) return sprintf(buf, "%s\n", int3400_thermal_uuids[priv->current_uuid_index]); for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) { if (priv->os_uuid_mask & BIT(i)) length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]); } if (length) return length; return sprintf(buf, "INVALID\n"); } static int int3400_thermal_run_osc(acpi_handle handle, char *uuid_str, int *enable) { u32 ret, buf[2]; acpi_status status; int result = 0; struct acpi_osc_context context = { .uuid_str = uuid_str, .rev = 1, .cap.length = 8, .cap.pointer = buf, }; buf[OSC_QUERY_DWORD] = 0; buf[OSC_SUPPORT_DWORD] = *enable; status = acpi_run_osc(handle, &context); if (ACPI_SUCCESS(status)) { ret = *((u32 *)(context.ret.pointer + 4)); if (ret != *enable) result = -EPERM; kfree(context.ret.pointer); } else result = -EPERM; return result; } static int set_os_uuid_mask(struct int3400_thermal_priv *priv, u32 mask) { int cap = 0; /* * Capability bits: * Bit 0: set to 1 to indicate DPTF is active * Bi1 1: set to 1 to active cooling is supported by user space daemon * Bit 2: set to 1 to passive cooling is supported by user space daemon * Bit 3: set to 1 to critical trip is handled by user space daemon */ if (mask) cap = (priv->os_uuid_mask << 1) | 0x01; return int3400_thermal_run_osc(priv->adev->handle, "b23ba85d-c8b7-3542-88de-8de2ffcfd698", &cap); } static ssize_t current_uuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int ret, i; for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) { if (!strncmp(buf, int3400_thermal_uuids[i], sizeof(int3400_thermal_uuids[i]) - 1)) { /* * If we have a list of supported UUIDs, make sure * this one is supported. */ if (priv->uuid_bitmap & BIT(i)) { priv->current_uuid_index = i; return count; } /* * There is support of only 3 policies via the new * _OSC to inform OS capability: * INT3400_THERMAL_ACTIVE * INT3400_THERMAL_PASSIVE_1 * INT3400_THERMAL_CRITICAL */ if (i > INT3400_THERMAL_CRITICAL) return -EINVAL; priv->os_uuid_mask |= BIT(i); break; } } if (priv->os_uuid_mask) { ret = set_os_uuid_mask(priv, priv->os_uuid_mask); if (ret) return ret; } return count; } static DEVICE_ATTR_RW(current_uuid); static DEVICE_ATTR_RO(available_uuids); static struct attribute *uuid_attrs[] = { &dev_attr_available_uuids.attr, &dev_attr_current_uuid.attr, NULL }; static const struct attribute_group uuid_attribute_group = { .attrs = uuid_attrs, .name = "uuids" }; static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obja, *objb; int i, j; int result = 0; acpi_status status; status = acpi_evaluate_object(priv->adev->handle, "IDSP", NULL, &buf); if (ACPI_FAILURE(status)) return -ENODEV; obja = (union acpi_object *)buf.pointer; if (obja->type != ACPI_TYPE_PACKAGE) { result = -EINVAL; goto end; } for (i = 0; i < obja->package.count; i++) { objb = &obja->package.elements[i]; if (objb->type != ACPI_TYPE_BUFFER) { result = -EINVAL; goto end; } /* UUID must be 16 bytes */ if (objb->buffer.length != 16) { result = -EINVAL; goto end; } for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) { guid_t guid; guid_parse(int3400_thermal_uuids[j], &guid); if (guid_equal((guid_t *)objb->buffer.pointer, &guid)) { priv->uuid_bitmap |= (1 << j); break; } } } end: kfree(buf.pointer); return result; } static ssize_t production_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct int3400_thermal_priv *priv = dev_get_drvdata(dev); return sysfs_emit(buf, "%d\n", priv->production_mode); } static DEVICE_ATTR_RO(production_mode); static int production_mode_init(struct int3400_thermal_priv *priv) { unsigned long long mode; acpi_status status; int ret; priv->production_mode = -1; status = acpi_evaluate_integer(priv->adev->handle, "DCFG", NULL, &mode); /* If the method is not present, this is not an error */ if (ACPI_FAILURE(status)) return 0; ret = sysfs_create_file(&priv->pdev->dev.kobj, &dev_attr_production_mode.attr); if (ret) return ret; priv->production_mode = mode; return 0; } static void production_mode_exit(struct int3400_thermal_priv *priv) { if (priv->production_mode >= 0) sysfs_remove_file(&priv->pdev->dev.kobj, &dev_attr_production_mode.attr); } static ssize_t odvp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct odvp_attr *odvp_attr; odvp_attr = container_of(attr, struct odvp_attr, attr); return sprintf(buf, "%d\n", odvp_attr->priv->odvp[odvp_attr->odvp]); } static void cleanup_odvp(struct int3400_thermal_priv *priv) { int i; if (priv->odvp_attrs) { for (i = 0; i < priv->odvp_count; i++) { sysfs_remove_file(&priv->pdev->dev.kobj, &priv->odvp_attrs[i].attr.attr); kfree(priv->odvp_attrs[i].attr.attr.name); } kfree(priv->odvp_attrs); } kfree(priv->odvp); priv->odvp_count = 0; } static int evaluate_odvp(struct int3400_thermal_priv *priv) { struct acpi_buffer odvp = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj = NULL; acpi_status status; int i, ret; status = acpi_evaluate_object(priv->adev->handle, "ODVP", NULL, &odvp); if (ACPI_FAILURE(status)) { ret = -EINVAL; goto out_err; } obj = odvp.pointer; if (obj->type != ACPI_TYPE_PACKAGE) { ret = -EINVAL; goto out_err; } if (priv->odvp == NULL) { priv->odvp_count = obj->package.count; priv->odvp = kmalloc_array(priv->odvp_count, sizeof(int), GFP_KERNEL); if (!priv->odvp) { ret = -ENOMEM; goto out_err; } } if (priv->odvp_attrs == NULL) { priv->odvp_attrs = kcalloc(priv->odvp_count, sizeof(struct odvp_attr), GFP_KERNEL); if (!priv->odvp_attrs) { ret = -ENOMEM; goto out_err; } for (i = 0; i < priv->odvp_count; i++) { struct odvp_attr *odvp = &priv->odvp_attrs[i]; sysfs_attr_init(&odvp->attr.attr); odvp->priv = priv; odvp->odvp = i; odvp->attr.attr.name = kasprintf(GFP_KERNEL, "odvp%d", i); if (!odvp->attr.attr.name) { ret = -ENOMEM; goto out_err; } odvp->attr.attr.mode = 0444; odvp->attr.show = odvp_show; odvp->attr.store = NULL; ret = sysfs_create_file(&priv->pdev->dev.kobj, &odvp->attr.attr); if (ret) goto out_err; } } for (i = 0; i < obj->package.count; i++) { if (obj->package.elements[i].type == ACPI_TYPE_INTEGER) priv->odvp[i] = obj->package.elements[i].integer.value; } kfree(obj); return 0; out_err: cleanup_odvp(priv); kfree(obj); return ret; } static void int3400_notify(acpi_handle handle, u32 event, void *data) { struct int3400_thermal_priv *priv = data; struct device *dev; char *thermal_prop[5]; int therm_event; if (!priv) return; switch (event) { case INT3400_THERMAL_TABLE_CHANGED: therm_event = THERMAL_TABLE_CHANGED; break; case INT3400_KEEP_ALIVE: therm_event = THERMAL_EVENT_KEEP_ALIVE; break; case INT3400_ODVP_CHANGED: evaluate_odvp(priv); therm_event = THERMAL_DEVICE_POWER_CAPABILITY_CHANGED; break; default: /* Ignore unknown notification codes sent to INT3400 device */ return; } dev = thermal_zone_device(priv->thermal); thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", thermal_zone_device_type(priv->thermal)); thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", INT3400_FAKE_TEMP); thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP="); thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); thermal_prop[4] = NULL; kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, thermal_prop); kfree(thermal_prop[0]); kfree(thermal_prop[1]); kfree(thermal_prop[2]); kfree(thermal_prop[3]); } static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, int *temp) { *temp = INT3400_FAKE_TEMP; return 0; } static int int3400_thermal_change_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { struct int3400_thermal_priv *priv = thermal_zone_device_priv(thermal); int result = 0; int enabled; if (!priv) return -EINVAL; enabled = mode == THERMAL_DEVICE_ENABLED; if (priv->os_uuid_mask) { if (!enabled) { priv->os_uuid_mask = 0; result = set_os_uuid_mask(priv, priv->os_uuid_mask); } goto eval_odvp; } if (priv->current_uuid_index < 0 || priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID) return -EINVAL; result = int3400_thermal_run_osc(priv->adev->handle, int3400_thermal_uuids[priv->current_uuid_index], &enabled); eval_odvp: evaluate_odvp(priv); return result; } static struct thermal_zone_device_ops int3400_thermal_ops = { .get_temp = int3400_thermal_get_temp, .change_mode = int3400_thermal_change_mode, }; static struct thermal_zone_params int3400_thermal_params = { .governor_name = "user_space", .no_hwmon = true, }; static void int3400_setup_gddv(struct int3400_thermal_priv *priv) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; status = acpi_evaluate_object(priv->adev->handle, "GDDV", NULL, &buffer); if (ACPI_FAILURE(status) || !buffer.length) return; obj = buffer.pointer; if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 1 || obj->package.elements[0].type != ACPI_TYPE_BUFFER) goto out_free; priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer, obj->package.elements[0].buffer.length, GFP_KERNEL); if (ZERO_OR_NULL_PTR(priv->data_vault)) goto out_free; bin_attr_data_vault.private = priv->data_vault; bin_attr_data_vault.size = obj->package.elements[0].buffer.length; out_free: kfree(buffer.pointer); } static int int3400_thermal_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct int3400_thermal_priv *priv; int result; if (!adev) return -ENODEV; priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->pdev = pdev; priv->adev = adev; result = int3400_thermal_get_uuids(priv); /* Missing IDSP isn't fatal */ if (result && result != -ENODEV) goto free_priv; priv->current_uuid_index = -1; result = acpi_parse_art(priv->adev->handle, &priv->art_count, &priv->arts, true); if (result) dev_dbg(&pdev->dev, "_ART table parsing error\n"); result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, &priv->trts, true); if (result) dev_dbg(&pdev->dev, "_TRT table parsing error\n"); platform_set_drvdata(pdev, priv); int3400_setup_gddv(priv); evaluate_odvp(priv); priv->thermal = thermal_tripless_zone_device_register("INT3400 Thermal", priv, &int3400_thermal_ops, &int3400_thermal_params); if (IS_ERR(priv->thermal)) { result = PTR_ERR(priv->thermal); goto free_art_trt; } priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( priv->adev->handle); result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group); if (result) goto free_rel_misc; if (acpi_has_method(priv->adev->handle, "IMOK")) { result = sysfs_create_group(&pdev->dev.kobj, &imok_attribute_group); if (result) goto free_imok; } if (!ZERO_OR_NULL_PTR(priv->data_vault)) { result = sysfs_create_group(&pdev->dev.kobj, &data_attribute_group); if (result) goto free_uuid; } result = acpi_install_notify_handler( priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify, (void *)priv); if (result) goto free_sysfs; result = production_mode_init(priv); if (result) goto free_notify; return 0; free_notify: acpi_remove_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify); free_sysfs: cleanup_odvp(priv); if (!ZERO_OR_NULL_PTR(priv->data_vault)) { sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); kfree(priv->data_vault); } free_uuid: sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); free_imok: sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group); free_rel_misc: if (!priv->rel_misc_dev_res) acpi_thermal_rel_misc_device_remove(priv->adev->handle); thermal_zone_device_unregister(priv->thermal); free_art_trt: kfree(priv->trts); kfree(priv->arts); free_priv: kfree(priv); return result; } static int int3400_thermal_remove(struct platform_device *pdev) { struct int3400_thermal_priv *priv = platform_get_drvdata(pdev); production_mode_exit(priv); acpi_remove_notify_handler( priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify); cleanup_odvp(priv); if (!priv->rel_misc_dev_res) acpi_thermal_rel_misc_device_remove(priv->adev->handle); if (!ZERO_OR_NULL_PTR(priv->data_vault)) sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group); thermal_zone_device_unregister(priv->thermal); kfree(priv->data_vault); kfree(priv->trts); kfree(priv->arts); kfree(priv); return 0; } static const struct acpi_device_id int3400_thermal_match[] = { {"INT3400", 0}, {"INTC1040", 0}, {"INTC1041", 0}, {"INTC1042", 0}, {"INTC10A0", 0}, {} }; MODULE_DEVICE_TABLE(acpi, int3400_thermal_match); static struct platform_driver int3400_thermal_driver = { .probe = int3400_thermal_probe, .remove = int3400_thermal_remove, .driver = { .name = "int3400 thermal", .acpi_match_table = ACPI_PTR(int3400_thermal_match), }, }; module_platform_driver(int3400_thermal_driver); MODULE_DESCRIPTION("INT3400 Thermal driver"); MODULE_AUTHOR("Zhang Rui <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * processor_thermal_device.c * Copyright (c) 2014, Intel Corporation. */ #include <linux/acpi.h> #include <linux/intel_tcc.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/thermal.h> #include "int340x_thermal_zone.h" #include "processor_thermal_device.h" #include "../intel_soc_dts_iosf.h" #define DRV_NAME "proc_thermal" #define POWER_LIMIT_SHOW(index, suffix) \ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct proc_thermal_device *proc_dev = dev_get_drvdata(dev); \ \ return sprintf(buf, "%lu\n",\ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \ } POWER_LIMIT_SHOW(0, min_uw) POWER_LIMIT_SHOW(0, max_uw) POWER_LIMIT_SHOW(0, step_uw) POWER_LIMIT_SHOW(0, tmin_us) POWER_LIMIT_SHOW(0, tmax_us) POWER_LIMIT_SHOW(1, min_uw) POWER_LIMIT_SHOW(1, max_uw) POWER_LIMIT_SHOW(1, step_uw) POWER_LIMIT_SHOW(1, tmin_us) POWER_LIMIT_SHOW(1, tmax_us) static DEVICE_ATTR_RO(power_limit_0_min_uw); static DEVICE_ATTR_RO(power_limit_0_max_uw); static DEVICE_ATTR_RO(power_limit_0_step_uw); static DEVICE_ATTR_RO(power_limit_0_tmin_us); static DEVICE_ATTR_RO(power_limit_0_tmax_us); static DEVICE_ATTR_RO(power_limit_1_min_uw); static DEVICE_ATTR_RO(power_limit_1_max_uw); static DEVICE_ATTR_RO(power_limit_1_step_uw); static DEVICE_ATTR_RO(power_limit_1_tmin_us); static DEVICE_ATTR_RO(power_limit_1_tmax_us); static struct attribute *power_limit_attrs[] = { &dev_attr_power_limit_0_min_uw.attr, &dev_attr_power_limit_1_min_uw.attr, &dev_attr_power_limit_0_max_uw.attr, &dev_attr_power_limit_1_max_uw.attr, &dev_attr_power_limit_0_step_uw.attr, &dev_attr_power_limit_1_step_uw.attr, &dev_attr_power_limit_0_tmin_us.attr, &dev_attr_power_limit_1_tmin_us.attr, &dev_attr_power_limit_0_tmax_us.attr, &dev_attr_power_limit_1_tmax_us.attr, NULL }; static const struct attribute_group power_limit_attribute_group = { .attrs = power_limit_attrs, .name = "power_limits" }; static ssize_t tcc_offset_degree_celsius_show(struct device *dev, struct device_attribute *attr, char *buf) { int offset; offset = intel_tcc_get_offset(-1); if (offset < 0) return offset; return sprintf(buf, "%d\n", offset); } static ssize_t tcc_offset_degree_celsius_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int tcc; u64 val; int err; err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); if (err) return err; if (!(val & BIT(30))) return -EACCES; if (kstrtouint(buf, 0, &tcc)) return -EINVAL; err = intel_tcc_set_offset(-1, tcc); if (err) return err; return count; } static DEVICE_ATTR_RW(tcc_offset_degree_celsius); static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone, int *temp) { int cpu; int curr_temp; *temp = 0; for_each_online_cpu(cpu) { curr_temp = intel_tcc_get_temp(cpu, false); if (curr_temp < 0) return curr_temp; if (!*temp || curr_temp > *temp) *temp = curr_temp; } *temp *= 1000; return 0; } static int proc_thermal_read_ppcc(struct proc_thermal_device *proc_priv) { int i; acpi_status status; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *elements, *ppcc; union acpi_object *p; int ret = 0; status = acpi_evaluate_object(proc_priv->adev->handle, "PPCC", NULL, &buf); if (ACPI_FAILURE(status)) return -ENODEV; p = buf.pointer; if (!p || (p->type != ACPI_TYPE_PACKAGE)) { dev_err(proc_priv->dev, "Invalid PPCC data\n"); ret = -EFAULT; goto free_buffer; } if (!p->package.count) { dev_err(proc_priv->dev, "Invalid PPCC package size\n"); ret = -EFAULT; goto free_buffer; } for (i = 0; i < min((int)p->package.count - 1, 2); ++i) { elements = &(p->package.elements[i+1]); if (elements->type != ACPI_TYPE_PACKAGE || elements->package.count != 6) { ret = -EFAULT; goto free_buffer; } ppcc = elements->package.elements; proc_priv->power_limits[i].index = ppcc[0].integer.value; proc_priv->power_limits[i].min_uw = ppcc[1].integer.value; proc_priv->power_limits[i].max_uw = ppcc[2].integer.value; proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value; proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value; proc_priv->power_limits[i].step_uw = ppcc[5].integer.value; } free_buffer: kfree(buf.pointer); return ret; } #define PROC_POWER_CAPABILITY_CHANGED 0x83 static void proc_thermal_notify(acpi_handle handle, u32 event, void *data) { struct proc_thermal_device *proc_priv = data; if (!proc_priv) return; switch (event) { case PROC_POWER_CAPABILITY_CHANGED: proc_thermal_read_ppcc(proc_priv); int340x_thermal_zone_device_update(proc_priv->int340x_zone, THERMAL_DEVICE_POWER_CAPABILITY_CHANGED); break; default: dev_dbg(proc_priv->dev, "Unsupported event [0x%x]\n", event); break; } } int proc_thermal_add(struct device *dev, struct proc_thermal_device *proc_priv) { struct acpi_device *adev; acpi_status status; unsigned long long tmp; int (*get_temp) (struct thermal_zone_device *, int *) = NULL; int ret; adev = ACPI_COMPANION(dev); if (!adev) return -ENODEV; proc_priv->dev = dev; proc_priv->adev = adev; ret = proc_thermal_read_ppcc(proc_priv); if (ret) return ret; status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp); if (ACPI_FAILURE(status)) { /* there is no _TMP method, add local method */ if (intel_tcc_get_tjmax(-1) > 0) get_temp = proc_thermal_get_zone_temp; } proc_priv->int340x_zone = int340x_thermal_zone_add(adev, get_temp); if (IS_ERR(proc_priv->int340x_zone)) { return PTR_ERR(proc_priv->int340x_zone); } else ret = 0; ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, proc_thermal_notify, (void *)proc_priv); if (ret) goto remove_zone; ret = sysfs_create_file(&dev->kobj, &dev_attr_tcc_offset_degree_celsius.attr); if (ret) goto remove_notify; ret = sysfs_create_group(&dev->kobj, &power_limit_attribute_group); if (ret) { sysfs_remove_file(&dev->kobj, &dev_attr_tcc_offset_degree_celsius.attr); goto remove_notify; } return 0; remove_notify: acpi_remove_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, proc_thermal_notify); remove_zone: int340x_thermal_zone_remove(proc_priv->int340x_zone); return ret; } EXPORT_SYMBOL_GPL(proc_thermal_add); void proc_thermal_remove(struct proc_thermal_device *proc_priv) { acpi_remove_notify_handler(proc_priv->adev->handle, ACPI_DEVICE_NOTIFY, proc_thermal_notify); int340x_thermal_zone_remove(proc_priv->int340x_zone); sysfs_remove_file(&proc_priv->dev->kobj, &dev_attr_tcc_offset_degree_celsius.attr); sysfs_remove_group(&proc_priv->dev->kobj, &power_limit_attribute_group); } EXPORT_SYMBOL_GPL(proc_thermal_remove); static int tcc_offset_save = -1; int proc_thermal_suspend(struct device *dev) { tcc_offset_save = intel_tcc_get_offset(-1); if (tcc_offset_save < 0) dev_warn(dev, "failed to save offset (%d)\n", tcc_offset_save); return 0; } EXPORT_SYMBOL_GPL(proc_thermal_suspend); int proc_thermal_resume(struct device *dev) { struct proc_thermal_device *proc_dev; proc_dev = dev_get_drvdata(dev); proc_thermal_read_ppcc(proc_dev); /* Do not update if saving failed */ if (tcc_offset_save >= 0) intel_tcc_set_offset(-1, tcc_offset_save); return 0; } EXPORT_SYMBOL_GPL(proc_thermal_resume); #define MCHBAR 0 static int proc_thermal_set_mmio_base(struct pci_dev *pdev, struct proc_thermal_device *proc_priv) { int ret; ret = pcim_iomap_regions(pdev, 1 << MCHBAR, DRV_NAME); if (ret) { dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); return -ENOMEM; } proc_priv->mmio_base = pcim_iomap_table(pdev)[MCHBAR]; return 0; } int proc_thermal_mmio_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv, kernel_ulong_t feature_mask) { int ret; proc_priv->mmio_feature_mask = feature_mask; if (feature_mask) { ret = proc_thermal_set_mmio_base(pdev, proc_priv); if (ret) return ret; } if (feature_mask & PROC_THERMAL_FEATURE_RAPL) { ret = proc_thermal_rapl_add(pdev, proc_priv); if (ret) { dev_err(&pdev->dev, "failed to add RAPL MMIO interface\n"); return ret; } } if (feature_mask & PROC_THERMAL_FEATURE_FIVR || feature_mask & PROC_THERMAL_FEATURE_DVFS || feature_mask & PROC_THERMAL_FEATURE_DLVR) { ret = proc_thermal_rfim_add(pdev, proc_priv); if (ret) { dev_err(&pdev->dev, "failed to add RFIM interface\n"); goto err_rem_rapl; } } if (feature_mask & PROC_THERMAL_FEATURE_MBOX) { ret = proc_thermal_mbox_add(pdev, proc_priv); if (ret) { dev_err(&pdev->dev, "failed to add MBOX interface\n"); goto err_rem_rfim; } } return 0; err_rem_rfim: proc_thermal_rfim_remove(pdev); err_rem_rapl: proc_thermal_rapl_remove(); return ret; } EXPORT_SYMBOL_GPL(proc_thermal_mmio_add); void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *proc_priv) { if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_RAPL) proc_thermal_rapl_remove(); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR || proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS) proc_thermal_rfim_remove(pdev); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_MBOX) proc_thermal_mbox_remove(pdev); } EXPORT_SYMBOL_GPL(proc_thermal_mmio_remove); MODULE_IMPORT_NS(INTEL_TCC); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
// SPDX-License-Identifier: GPL-2.0-only /* * INT3406 thermal driver for display participant device * * Copyright (C) 2016, Intel Corporation * Authors: Aaron Lu <[email protected]> */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/backlight.h> #include <linux/thermal.h> #include <acpi/video.h> #define INT3406_BRIGHTNESS_LIMITS_CHANGED 0x80 struct int3406_thermal_data { int upper_limit; int lower_limit; acpi_handle handle; struct acpi_video_device_brightness *br; struct backlight_device *raw_bd; struct thermal_cooling_device *cooling_dev; }; /* * According to the ACPI spec, * "Each brightness level is represented by a number between 0 and 100, * and can be thought of as a percentage. For example, 50 can be 50% * power consumption or 50% brightness, as defined by the OEM." * * As int3406 device uses this value to communicate with the native * graphics driver, we make the assumption that it represents * the percentage of brightness only */ #define ACPI_TO_RAW(v, d) (d->raw_bd->props.max_brightness * v / 100) #define RAW_TO_ACPI(v, d) (v * 100 / d->raw_bd->props.max_brightness) static int int3406_thermal_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned long *state) { struct int3406_thermal_data *d = cooling_dev->devdata; *state = d->upper_limit - d->lower_limit; return 0; } static int int3406_thermal_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state) { struct int3406_thermal_data *d = cooling_dev->devdata; int acpi_level, raw_level; if (state > d->upper_limit - d->lower_limit) return -EINVAL; acpi_level = d->br->levels[d->upper_limit - state]; raw_level = ACPI_TO_RAW(acpi_level, d); return backlight_device_set_brightness(d->raw_bd, raw_level); } static int int3406_thermal_get_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long *state) { struct int3406_thermal_data *d = cooling_dev->devdata; int acpi_level; int index; acpi_level = RAW_TO_ACPI(d->raw_bd->props.brightness, d); /* * There is no 1:1 mapping between the firmware interface level * with the raw interface level, we will have to find one that is * right above it. */ for (index = d->lower_limit; index < d->upper_limit; index++) { if (acpi_level <= d->br->levels[index]) break; } *state = d->upper_limit - index; return 0; } static const struct thermal_cooling_device_ops video_cooling_ops = { .get_max_state = int3406_thermal_get_max_state, .get_cur_state = int3406_thermal_get_cur_state, .set_cur_state = int3406_thermal_set_cur_state, }; static int int3406_thermal_get_index(int *array, int nr, int value) { int i; for (i = 2; i < nr; i++) { if (array[i] == value) break; } return i == nr ? -ENOENT : i; } static void int3406_thermal_get_limit(struct int3406_thermal_data *d) { acpi_status status; unsigned long long lower_limit, upper_limit; status = acpi_evaluate_integer(d->handle, "DDDL", NULL, &lower_limit); if (ACPI_SUCCESS(status)) d->lower_limit = int3406_thermal_get_index(d->br->levels, d->br->count, lower_limit); status = acpi_evaluate_integer(d->handle, "DDPC", NULL, &upper_limit); if (ACPI_SUCCESS(status)) d->upper_limit = int3406_thermal_get_index(d->br->levels, d->br->count, upper_limit); /* lower_limit and upper_limit should be always set */ d->lower_limit = d->lower_limit > 0 ? d->lower_limit : 2; d->upper_limit = d->upper_limit > 0 ? d->upper_limit : d->br->count - 1; } static void int3406_notify(acpi_handle handle, u32 event, void *data) { if (event == INT3406_BRIGHTNESS_LIMITS_CHANGED) int3406_thermal_get_limit(data); } static int int3406_thermal_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct int3406_thermal_data *d; struct backlight_device *bd; int ret; if (!ACPI_HANDLE(&pdev->dev)) return -ENODEV; d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; d->handle = ACPI_HANDLE(&pdev->dev); bd = backlight_device_get_by_type(BACKLIGHT_RAW); if (!bd) return -ENODEV; d->raw_bd = bd; ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL); if (ret) return ret; int3406_thermal_get_limit(d); d->cooling_dev = thermal_cooling_device_register(acpi_device_bid(adev), d, &video_cooling_ops); if (IS_ERR(d->cooling_dev)) goto err; ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, int3406_notify, d); if (ret) goto err_cdev; platform_set_drvdata(pdev, d); return 0; err_cdev: thermal_cooling_device_unregister(d->cooling_dev); err: kfree(d->br); return -ENODEV; } static int int3406_thermal_remove(struct platform_device *pdev) { struct int3406_thermal_data *d = platform_get_drvdata(pdev); thermal_cooling_device_unregister(d->cooling_dev); kfree(d->br); return 0; } static const struct acpi_device_id int3406_thermal_match[] = { {"INT3406", 0}, {} }; MODULE_DEVICE_TABLE(acpi, int3406_thermal_match); static struct platform_driver int3406_thermal_driver = { .probe = int3406_thermal_probe, .remove = int3406_thermal_remove, .driver = { .name = "int3406 thermal", .acpi_match_table = int3406_thermal_match, }, }; module_platform_driver(int3406_thermal_driver); MODULE_DESCRIPTION("INT3406 Thermal driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/intel/int340x_thermal/int3406_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * DRA752 thermal data. * * Copyright (C) 2013 Texas Instruments Inc. * Contact: * Eduardo Valentin <[email protected]> * Tero Kristo <[email protected]> * * This file is partially autogenerated. */ #include "ti-thermal.h" #include "ti-bandgap.h" #include "dra752-bandgap.h" /* * DRA752 has five instances of thermal sensor: MPU, GPU, CORE, * IVA and DSPEVE need to describe the individual registers and * bit fields. */ /* * DRA752 CORE thermal sensor register offsets and bit-fields */ static struct temp_sensor_registers dra752_core_temp_sensor_registers = { .temp_sensor_ctrl = DRA752_TEMP_SENSOR_CORE_OFFSET, .bgap_tempsoff_mask = DRA752_TEMP_SENSOR_TMPSOFF_MASK, .bgap_eocz_mask = DRA752_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = DRA752_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET, .mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_CORE_MASK, .mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_CORE_MASK, .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK, .mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_CORE_MASK, .bgap_threshold = DRA752_BANDGAP_THRESHOLD_CORE_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_CORE_MASK, .status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_CORE_MASK, .ctrl_dtemp_1 = DRA752_DTEMP_CORE_1_OFFSET, .ctrl_dtemp_2 = DRA752_DTEMP_CORE_2_OFFSET, .bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_CORE_OFFSET, }; /* * DRA752 IVA thermal sensor register offsets and bit-fields */ static struct temp_sensor_registers dra752_iva_temp_sensor_registers = { .temp_sensor_ctrl = DRA752_TEMP_SENSOR_IVA_OFFSET, .bgap_tempsoff_mask = DRA752_TEMP_SENSOR_TMPSOFF_MASK, .bgap_eocz_mask = DRA752_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = DRA752_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = DRA752_BANDGAP_CTRL_2_OFFSET, .mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_IVA_MASK, .mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_IVA_MASK, .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK, .mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_IVA_MASK, .bgap_threshold = DRA752_BANDGAP_THRESHOLD_IVA_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET, .status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_IVA_MASK, .status_cold_mask = DRA752_BANDGAP_STATUS_2_COLD_IVA_MASK, .ctrl_dtemp_1 = DRA752_DTEMP_IVA_1_OFFSET, .ctrl_dtemp_2 = DRA752_DTEMP_IVA_2_OFFSET, .bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_IVA_OFFSET, }; /* * DRA752 MPU thermal sensor register offsets and bit-fields */ static struct temp_sensor_registers dra752_mpu_temp_sensor_registers = { .temp_sensor_ctrl = DRA752_TEMP_SENSOR_MPU_OFFSET, .bgap_tempsoff_mask = DRA752_TEMP_SENSOR_TMPSOFF_MASK, .bgap_eocz_mask = DRA752_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = DRA752_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET, .mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_MPU_MASK, .mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_MPU_MASK, .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK, .mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_MPU_MASK, .bgap_threshold = DRA752_BANDGAP_THRESHOLD_MPU_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_MPU_MASK, .status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_MPU_MASK, .ctrl_dtemp_1 = DRA752_DTEMP_MPU_1_OFFSET, .ctrl_dtemp_2 = DRA752_DTEMP_MPU_2_OFFSET, .bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_MPU_OFFSET, }; /* * DRA752 DSPEVE thermal sensor register offsets and bit-fields */ static struct temp_sensor_registers dra752_dspeve_temp_sensor_registers = { .temp_sensor_ctrl = DRA752_TEMP_SENSOR_DSPEVE_OFFSET, .bgap_tempsoff_mask = DRA752_TEMP_SENSOR_TMPSOFF_MASK, .bgap_eocz_mask = DRA752_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = DRA752_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = DRA752_BANDGAP_CTRL_2_OFFSET, .mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_DSPEVE_MASK, .mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_DSPEVE_MASK, .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK, .mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_DSPEVE_MASK, .bgap_threshold = DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET, .status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_DSPEVE_MASK, .status_cold_mask = DRA752_BANDGAP_STATUS_2_COLD_DSPEVE_MASK, .ctrl_dtemp_1 = DRA752_DTEMP_DSPEVE_1_OFFSET, .ctrl_dtemp_2 = DRA752_DTEMP_DSPEVE_2_OFFSET, .bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_DSPEVE_OFFSET, }; /* * DRA752 GPU thermal sensor register offsets and bit-fields */ static struct temp_sensor_registers dra752_gpu_temp_sensor_registers = { .temp_sensor_ctrl = DRA752_TEMP_SENSOR_GPU_OFFSET, .bgap_tempsoff_mask = DRA752_TEMP_SENSOR_TMPSOFF_MASK, .bgap_eocz_mask = DRA752_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = DRA752_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET, .mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_GPU_MASK, .mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_GPU_MASK, .mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK, .mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_GPU_MASK, .bgap_threshold = DRA752_BANDGAP_THRESHOLD_GPU_OFFSET, .threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK, .threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK, .bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET, .status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_GPU_MASK, .status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_GPU_MASK, .ctrl_dtemp_1 = DRA752_DTEMP_GPU_1_OFFSET, .ctrl_dtemp_2 = DRA752_DTEMP_GPU_2_OFFSET, .bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_GPU_OFFSET, }; /* Thresholds and limits for DRA752 MPU temperature sensor */ static struct temp_sensor_data dra752_mpu_temp_sensor_data = { .t_hot = DRA752_MPU_T_HOT, .t_cold = DRA752_MPU_T_COLD, .min_freq = DRA752_MPU_MIN_FREQ, .max_freq = DRA752_MPU_MAX_FREQ, }; /* Thresholds and limits for DRA752 GPU temperature sensor */ static struct temp_sensor_data dra752_gpu_temp_sensor_data = { .t_hot = DRA752_GPU_T_HOT, .t_cold = DRA752_GPU_T_COLD, .min_freq = DRA752_GPU_MIN_FREQ, .max_freq = DRA752_GPU_MAX_FREQ, }; /* Thresholds and limits for DRA752 CORE temperature sensor */ static struct temp_sensor_data dra752_core_temp_sensor_data = { .t_hot = DRA752_CORE_T_HOT, .t_cold = DRA752_CORE_T_COLD, .min_freq = DRA752_CORE_MIN_FREQ, .max_freq = DRA752_CORE_MAX_FREQ, }; /* Thresholds and limits for DRA752 DSPEVE temperature sensor */ static struct temp_sensor_data dra752_dspeve_temp_sensor_data = { .t_hot = DRA752_DSPEVE_T_HOT, .t_cold = DRA752_DSPEVE_T_COLD, .min_freq = DRA752_DSPEVE_MIN_FREQ, .max_freq = DRA752_DSPEVE_MAX_FREQ, }; /* Thresholds and limits for DRA752 IVA temperature sensor */ static struct temp_sensor_data dra752_iva_temp_sensor_data = { .t_hot = DRA752_IVA_T_HOT, .t_cold = DRA752_IVA_T_COLD, .min_freq = DRA752_IVA_MIN_FREQ, .max_freq = DRA752_IVA_MAX_FREQ, }; /* * DRA752 : Temperature values in milli degree celsius * ADC code values from 540 to 945 */ static int dra752_adc_to_temp[DRA752_ADC_END_VALUE - DRA752_ADC_START_VALUE + 1] = { /* Index 540 - 549 */ -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200, -37800, /* Index 550 - 559 */ -37400, -37000, -36600, -36200, -35800, -35300, -34700, -34200, -33800, -33400, /* Index 560 - 569 */ -33000, -32600, -32200, -31800, -31400, -31000, -30600, -30200, -29800, -29400, /* Index 570 - 579 */ -29000, -28600, -28200, -27700, -27100, -26600, -26200, -25800, -25400, -25000, /* Index 580 - 589 */ -24600, -24200, -23800, -23400, -23000, -22600, -22200, -21800, -21400, -21000, /* Index 590 - 599 */ -20500, -19900, -19400, -19000, -18600, -18200, -17800, -17400, -17000, -16600, /* Index 600 - 609 */ -16200, -15800, -15400, -15000, -14600, -14200, -13800, -13400, -13000, -12500, /* Index 610 - 619 */ -11900, -11400, -11000, -10600, -10200, -9800, -9400, -9000, -8600, -8200, /* Index 620 - 629 */ -7800, -7400, -7000, -6600, -6200, -5800, -5400, -5000, -4500, -3900, /* Index 630 - 639 */ -3400, -3000, -2600, -2200, -1800, -1400, -1000, -600, -200, 200, /* Index 640 - 649 */ 600, 1000, 1400, 1800, 2200, 2600, 3000, 3400, 3900, 4500, /* Index 650 - 659 */ 5000, 5400, 5800, 6200, 6600, 7000, 7400, 7800, 8200, 8600, /* Index 660 - 669 */ 9000, 9400, 9800, 10200, 10600, 11000, 11400, 11800, 12200, 12700, /* Index 670 - 679 */ 13300, 13800, 14200, 14600, 15000, 15400, 15800, 16200, 16600, 17000, /* Index 680 - 689 */ 17400, 17800, 18200, 18600, 19000, 19400, 19800, 20200, 20600, 21000, /* Index 690 - 699 */ 21400, 21900, 22500, 23000, 23400, 23800, 24200, 24600, 25000, 25400, /* Index 700 - 709 */ 25800, 26200, 26600, 27000, 27400, 27800, 28200, 28600, 29000, 29400, /* Index 710 - 719 */ 29800, 30200, 30600, 31000, 31400, 31900, 32500, 33000, 33400, 33800, /* Index 720 - 729 */ 34200, 34600, 35000, 35400, 35800, 36200, 36600, 37000, 37400, 37800, /* Index 730 - 739 */ 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41000, 41400, 41800, /* Index 740 - 749 */ 42200, 42600, 43100, 43700, 44200, 44600, 45000, 45400, 45800, 46200, /* Index 750 - 759 */ 46600, 47000, 47400, 47800, 48200, 48600, 49000, 49400, 49800, 50200, /* Index 760 - 769 */ 50600, 51000, 51400, 51800, 52200, 52600, 53000, 53400, 53800, 54200, /* Index 770 - 779 */ 54600, 55000, 55400, 55900, 56500, 57000, 57400, 57800, 58200, 58600, /* Index 780 - 789 */ 59000, 59400, 59800, 60200, 60600, 61000, 61400, 61800, 62200, 62600, /* Index 790 - 799 */ 63000, 63400, 63800, 64200, 64600, 65000, 65400, 65800, 66200, 66600, /* Index 800 - 809 */ 67000, 67400, 67800, 68200, 68600, 69000, 69400, 69800, 70200, 70600, /* Index 810 - 819 */ 71000, 71500, 72100, 72600, 73000, 73400, 73800, 74200, 74600, 75000, /* Index 820 - 829 */ 75400, 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000, /* Index 830 - 839 */ 79400, 79800, 80200, 80600, 81000, 81400, 81800, 82200, 82600, 83000, /* Index 840 - 849 */ 83400, 83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000, /* Index 850 - 859 */ 87400, 87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, /* Index 860 - 869 */ 91400, 91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, /* Index 870 - 879 */ 95400, 95800, 96200, 96600, 97000, 97500, 98100, 98600, 99000, 99400, /* Index 880 - 889 */ 99800, 100200, 100600, 101000, 101400, 101800, 102200, 102600, 103000, 103400, /* Index 890 - 899 */ 103800, 104200, 104600, 105000, 105400, 105800, 106200, 106600, 107000, 107400, /* Index 900 - 909 */ 107800, 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000, 111400, /* Index 910 - 919 */ 111800, 112200, 112600, 113000, 113400, 113800, 114200, 114600, 115000, 115400, /* Index 920 - 929 */ 115800, 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000, 119400, /* Index 930 - 939 */ 119800, 120200, 120600, 121000, 121400, 121800, 122200, 122600, 123000, 123400, /* Index 940 - 945 */ 123800, 124200, 124600, 124900, 125000, 125000, }; /* DRA752 data */ const struct ti_bandgap_data dra752_data = { .features = TI_BANDGAP_FEATURE_FREEZE_BIT | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_COUNTER_DELAY | TI_BANDGAP_FEATURE_HISTORY_BUFFER | TI_BANDGAP_FEATURE_ERRATA_814, .fclock_name = "l3instr_ts_gclk_div", .div_ck_name = "l3instr_ts_gclk_div", .conv_table = dra752_adc_to_temp, .adc_start_val = DRA752_ADC_START_VALUE, .adc_end_val = DRA752_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .sensors = { { .registers = &dra752_mpu_temp_sensor_registers, .ts_data = &dra752_mpu_temp_sensor_data, .domain = "cpu", .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, .slope_pcb = DRA752_GRADIENT_SLOPE_W_PCB, .constant_pcb = DRA752_GRADIENT_CONST_W_PCB, }, { .registers = &dra752_gpu_temp_sensor_registers, .ts_data = &dra752_gpu_temp_sensor_data, .domain = "gpu", .slope_pcb = DRA752_GRADIENT_SLOPE_W_PCB, .constant_pcb = DRA752_GRADIENT_CONST_W_PCB, }, { .registers = &dra752_core_temp_sensor_registers, .ts_data = &dra752_core_temp_sensor_data, .domain = "core", .slope_pcb = DRA752_GRADIENT_SLOPE_W_PCB, .constant_pcb = DRA752_GRADIENT_CONST_W_PCB, }, { .registers = &dra752_dspeve_temp_sensor_registers, .ts_data = &dra752_dspeve_temp_sensor_data, .domain = "dspeve", .slope_pcb = DRA752_GRADIENT_SLOPE_W_PCB, .constant_pcb = DRA752_GRADIENT_CONST_W_PCB, }, { .registers = &dra752_iva_temp_sensor_registers, .ts_data = &dra752_iva_temp_sensor_data, .domain = "iva", .slope_pcb = DRA752_GRADIENT_SLOPE_W_PCB, .constant_pcb = DRA752_GRADIENT_CONST_W_PCB, }, }, .sensor_count = 5, };
linux-master
drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI Bandgap temperature sensor driver * * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/ * Author: J Keerthy <[email protected]> * Author: Moiz Sonasath <[email protected]> * Couple of fixes, DT and MFD adaptation: * Eduardo Valentin <[email protected]> */ #include <linux/clk.h> #include <linux/cpu_pm.h> #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/sys_soc.h> #include <linux/types.h> #include "ti-bandgap.h" static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id); #ifdef CONFIG_PM_SLEEP static int bandgap_omap_cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v); #endif /*** Helper functions to access registers and their bitfields ***/ /** * ti_bandgap_readl() - simple read helper function * @bgp: pointer to ti_bandgap structure * @reg: desired register (offset) to be read * * Helper function to read bandgap registers. It uses the io remapped area. * Return: the register value. */ static u32 ti_bandgap_readl(struct ti_bandgap *bgp, u32 reg) { return readl(bgp->base + reg); } /** * ti_bandgap_writel() - simple write helper function * @bgp: pointer to ti_bandgap structure * @val: desired register value to be written * @reg: desired register (offset) to be written * * Helper function to write bandgap registers. It uses the io remapped area. */ static void ti_bandgap_writel(struct ti_bandgap *bgp, u32 val, u32 reg) { writel(val, bgp->base + reg); } /** * DOC: macro to update bits. * * RMW_BITS() - used to read, modify and update bandgap bitfields. * The value passed will be shifted. */ #define RMW_BITS(bgp, id, reg, mask, val) \ do { \ struct temp_sensor_registers *t; \ u32 r; \ \ t = bgp->conf->sensors[(id)].registers; \ r = ti_bandgap_readl(bgp, t->reg); \ r &= ~t->mask; \ r |= (val) << __ffs(t->mask); \ ti_bandgap_writel(bgp, r, t->reg); \ } while (0) /*** Basic helper functions ***/ /** * ti_bandgap_power() - controls the power state of a bandgap device * @bgp: pointer to ti_bandgap structure * @on: desired power state (1 - on, 0 - off) * * Used to power on/off a bandgap device instance. Only used on those * that features tempsoff bit. * * Return: 0 on success, -ENOTSUPP if tempsoff is not supported. */ static int ti_bandgap_power(struct ti_bandgap *bgp, bool on) { int i; if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) return -ENOTSUPP; for (i = 0; i < bgp->conf->sensor_count; i++) /* active on 0 */ RMW_BITS(bgp, i, temp_sensor_ctrl, bgap_tempsoff_mask, !on); return 0; } /** * ti_errata814_bandgap_read_temp() - helper function to read dra7 sensor temperature * @bgp: pointer to ti_bandgap structure * @reg: desired register (offset) to be read * * Function to read dra7 bandgap sensor temperature. This is done separately * so as to workaround the errata "Bandgap Temperature read Dtemp can be * corrupted" - Errata ID: i814". * Read accesses to registers listed below can be corrupted due to incorrect * resynchronization between clock domains. * Read access to registers below can be corrupted : * CTRL_CORE_DTEMP_MPU/GPU/CORE/DSPEVE/IVA_n (n = 0 to 4) * CTRL_CORE_TEMP_SENSOR_MPU/GPU/CORE/DSPEVE/IVA_n * * Return: the register value. */ static u32 ti_errata814_bandgap_read_temp(struct ti_bandgap *bgp, u32 reg) { u32 val1, val2; val1 = ti_bandgap_readl(bgp, reg); val2 = ti_bandgap_readl(bgp, reg); /* If both times we read the same value then that is right */ if (val1 == val2) return val1; /* if val1 and val2 are different read it third time */ return ti_bandgap_readl(bgp, reg); } /** * ti_bandgap_read_temp() - helper function to read sensor temperature * @bgp: pointer to ti_bandgap structure * @id: bandgap sensor id * * Function to concentrate the steps to read sensor temperature register. * This function is desired because, depending on bandgap device version, * it might be needed to freeze the bandgap state machine, before fetching * the register value. * * Return: temperature in ADC values. */ static u32 ti_bandgap_read_temp(struct ti_bandgap *bgp, int id) { struct temp_sensor_registers *tsr; u32 temp, reg; tsr = bgp->conf->sensors[id].registers; reg = tsr->temp_sensor_ctrl; if (TI_BANDGAP_HAS(bgp, FREEZE_BIT)) { RMW_BITS(bgp, id, bgap_mask_ctrl, mask_freeze_mask, 1); /* * In case we cannot read from cur_dtemp / dtemp_0, * then we read from the last valid temp read */ reg = tsr->ctrl_dtemp_1; } /* read temperature */ if (TI_BANDGAP_HAS(bgp, ERRATA_814)) temp = ti_errata814_bandgap_read_temp(bgp, reg); else temp = ti_bandgap_readl(bgp, reg); temp &= tsr->bgap_dtemp_mask; if (TI_BANDGAP_HAS(bgp, FREEZE_BIT)) RMW_BITS(bgp, id, bgap_mask_ctrl, mask_freeze_mask, 0); return temp; } /*** IRQ handlers ***/ /** * ti_bandgap_talert_irq_handler() - handles Temperature alert IRQs * @irq: IRQ number * @data: private data (struct ti_bandgap *) * * This is the Talert handler. Use it only if bandgap device features * HAS(TALERT). This handler goes over all sensors and checks their * conditions and acts accordingly. In case there are events pending, * it will reset the event mask to wait for the opposite event (next event). * Every time there is a new event, it will be reported to thermal layer. * * Return: IRQ_HANDLED */ static irqreturn_t ti_bandgap_talert_irq_handler(int irq, void *data) { struct ti_bandgap *bgp = data; struct temp_sensor_registers *tsr; u32 t_hot = 0, t_cold = 0, ctrl; int i; spin_lock(&bgp->lock); for (i = 0; i < bgp->conf->sensor_count; i++) { tsr = bgp->conf->sensors[i].registers; ctrl = ti_bandgap_readl(bgp, tsr->bgap_status); /* Read the status of t_hot */ t_hot = ctrl & tsr->status_hot_mask; /* Read the status of t_cold */ t_cold = ctrl & tsr->status_cold_mask; if (!t_cold && !t_hot) continue; ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl); /* * One TALERT interrupt: Two sources * If the interrupt is due to t_hot then mask t_hot and * unmask t_cold else mask t_cold and unmask t_hot */ if (t_hot) { ctrl &= ~tsr->mask_hot_mask; ctrl |= tsr->mask_cold_mask; } else if (t_cold) { ctrl &= ~tsr->mask_cold_mask; ctrl |= tsr->mask_hot_mask; } ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl); dev_dbg(bgp->dev, "%s: IRQ from %s sensor: hotevent %d coldevent %d\n", __func__, bgp->conf->sensors[i].domain, t_hot, t_cold); /* report temperature to whom may concern */ if (bgp->conf->report_temperature) bgp->conf->report_temperature(bgp, i); } spin_unlock(&bgp->lock); return IRQ_HANDLED; } /** * ti_bandgap_tshut_irq_handler() - handles Temperature shutdown signal * @irq: IRQ number * @data: private data (unused) * * This is the Tshut handler. Use it only if bandgap device features * HAS(TSHUT). If any sensor fires the Tshut signal, we simply shutdown * the system. * * Return: IRQ_HANDLED */ static irqreturn_t ti_bandgap_tshut_irq_handler(int irq, void *data) { pr_emerg("%s: TSHUT temperature reached. Needs shut down...\n", __func__); orderly_poweroff(true); return IRQ_HANDLED; } /*** Helper functions which manipulate conversion ADC <-> mi Celsius ***/ /** * ti_bandgap_adc_to_mcelsius() - converts an ADC value to mCelsius scale * @bgp: struct ti_bandgap pointer * @adc_val: value in ADC representation * @t: address where to write the resulting temperature in mCelsius * * Simple conversion from ADC representation to mCelsius. In case the ADC value * is out of the ADC conv table range, it returns -ERANGE, 0 on success. * The conversion table is indexed by the ADC values. * * Return: 0 if conversion was successful, else -ERANGE in case the @adc_val * argument is out of the ADC conv table range. */ static int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t) { const struct ti_bandgap_data *conf = bgp->conf; /* look up for temperature in the table and return the temperature */ if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) return -ERANGE; *t = bgp->conf->conv_table[adc_val - conf->adc_start_val]; return 0; } /** * ti_bandgap_validate() - helper to check the sanity of a struct ti_bandgap * @bgp: struct ti_bandgap pointer * @id: bandgap sensor id * * Checks if the bandgap pointer is valid and if the sensor id is also * applicable. * * Return: 0 if no errors, -EINVAL for invalid @bgp pointer or -ERANGE if * @id cannot index @bgp sensors. */ static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id) { if (IS_ERR_OR_NULL(bgp)) { pr_err("%s: invalid bandgap pointer\n", __func__); return -EINVAL; } if ((id < 0) || (id >= bgp->conf->sensor_count)) { dev_err(bgp->dev, "%s: sensor id out of range (%d)\n", __func__, id); return -ERANGE; } return 0; } /** * ti_bandgap_read_counter() - read the sensor counter * @bgp: pointer to bandgap instance * @id: sensor id * @interval: resulting update interval in miliseconds */ static void ti_bandgap_read_counter(struct ti_bandgap *bgp, int id, int *interval) { struct temp_sensor_registers *tsr; int time; tsr = bgp->conf->sensors[id].registers; time = ti_bandgap_readl(bgp, tsr->bgap_counter); time = (time & tsr->counter_mask) >> __ffs(tsr->counter_mask); time = time * 1000 / bgp->clk_rate; *interval = time; } /** * ti_bandgap_read_counter_delay() - read the sensor counter delay * @bgp: pointer to bandgap instance * @id: sensor id * @interval: resulting update interval in miliseconds */ static void ti_bandgap_read_counter_delay(struct ti_bandgap *bgp, int id, int *interval) { struct temp_sensor_registers *tsr; int reg_val; tsr = bgp->conf->sensors[id].registers; reg_val = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl); reg_val = (reg_val & tsr->mask_counter_delay_mask) >> __ffs(tsr->mask_counter_delay_mask); switch (reg_val) { case 0: *interval = 0; break; case 1: *interval = 1; break; case 2: *interval = 10; break; case 3: *interval = 100; break; case 4: *interval = 250; break; case 5: *interval = 500; break; default: dev_warn(bgp->dev, "Wrong counter delay value read from register %X", reg_val); } } /** * ti_bandgap_read_update_interval() - read the sensor update interval * @bgp: pointer to bandgap instance * @id: sensor id * @interval: resulting update interval in miliseconds * * Return: 0 on success or the proper error code */ int ti_bandgap_read_update_interval(struct ti_bandgap *bgp, int id, int *interval) { int ret = 0; ret = ti_bandgap_validate(bgp, id); if (ret) goto exit; if (!TI_BANDGAP_HAS(bgp, COUNTER) && !TI_BANDGAP_HAS(bgp, COUNTER_DELAY)) { ret = -ENOTSUPP; goto exit; } if (TI_BANDGAP_HAS(bgp, COUNTER)) { ti_bandgap_read_counter(bgp, id, interval); goto exit; } ti_bandgap_read_counter_delay(bgp, id, interval); exit: return ret; } /** * ti_bandgap_write_counter_delay() - set the counter_delay * @bgp: pointer to bandgap instance * @id: sensor id * @interval: desired update interval in miliseconds * * Return: 0 on success or the proper error code */ static int ti_bandgap_write_counter_delay(struct ti_bandgap *bgp, int id, u32 interval) { int rval; switch (interval) { case 0: /* Immediate conversion */ rval = 0x0; break; case 1: /* Conversion after ever 1ms */ rval = 0x1; break; case 10: /* Conversion after ever 10ms */ rval = 0x2; break; case 100: /* Conversion after ever 100ms */ rval = 0x3; break; case 250: /* Conversion after ever 250ms */ rval = 0x4; break; case 500: /* Conversion after ever 500ms */ rval = 0x5; break; default: dev_warn(bgp->dev, "Delay %d ms is not supported\n", interval); return -EINVAL; } spin_lock(&bgp->lock); RMW_BITS(bgp, id, bgap_mask_ctrl, mask_counter_delay_mask, rval); spin_unlock(&bgp->lock); return 0; } /** * ti_bandgap_write_counter() - set the bandgap sensor counter * @bgp: pointer to bandgap instance * @id: sensor id * @interval: desired update interval in miliseconds */ static void ti_bandgap_write_counter(struct ti_bandgap *bgp, int id, u32 interval) { interval = interval * bgp->clk_rate / 1000; spin_lock(&bgp->lock); RMW_BITS(bgp, id, bgap_counter, counter_mask, interval); spin_unlock(&bgp->lock); } /** * ti_bandgap_write_update_interval() - set the update interval * @bgp: pointer to bandgap instance * @id: sensor id * @interval: desired update interval in miliseconds * * Return: 0 on success or the proper error code */ int ti_bandgap_write_update_interval(struct ti_bandgap *bgp, int id, u32 interval) { int ret = ti_bandgap_validate(bgp, id); if (ret) goto exit; if (!TI_BANDGAP_HAS(bgp, COUNTER) && !TI_BANDGAP_HAS(bgp, COUNTER_DELAY)) { ret = -ENOTSUPP; goto exit; } if (TI_BANDGAP_HAS(bgp, COUNTER)) { ti_bandgap_write_counter(bgp, id, interval); goto exit; } ret = ti_bandgap_write_counter_delay(bgp, id, interval); exit: return ret; } /** * ti_bandgap_read_temperature() - report current temperature * @bgp: pointer to bandgap instance * @id: sensor id * @temperature: resulting temperature * * Return: 0 on success or the proper error code */ int ti_bandgap_read_temperature(struct ti_bandgap *bgp, int id, int *temperature) { u32 temp; int ret; ret = ti_bandgap_validate(bgp, id); if (ret) return ret; if (!TI_BANDGAP_HAS(bgp, MODE_CONFIG)) { ret = ti_bandgap_force_single_read(bgp, id); if (ret) return ret; } spin_lock(&bgp->lock); temp = ti_bandgap_read_temp(bgp, id); spin_unlock(&bgp->lock); ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); if (ret) return -EIO; *temperature = temp; return 0; } /** * ti_bandgap_set_sensor_data() - helper function to store thermal * framework related data. * @bgp: pointer to bandgap instance * @id: sensor id * @data: thermal framework related data to be stored * * Return: 0 on success or the proper error code */ int ti_bandgap_set_sensor_data(struct ti_bandgap *bgp, int id, void *data) { int ret = ti_bandgap_validate(bgp, id); if (ret) return ret; bgp->regval[id].data = data; return 0; } /** * ti_bandgap_get_sensor_data() - helper function to get thermal * framework related data. * @bgp: pointer to bandgap instance * @id: sensor id * * Return: data stored by set function with sensor id on success or NULL */ void *ti_bandgap_get_sensor_data(struct ti_bandgap *bgp, int id) { int ret = ti_bandgap_validate(bgp, id); if (ret) return ERR_PTR(ret); return bgp->regval[id].data; } /*** Helper functions used during device initialization ***/ /** * ti_bandgap_force_single_read() - executes 1 single ADC conversion * @bgp: pointer to struct ti_bandgap * @id: sensor id which it is desired to read 1 temperature * * Used to initialize the conversion state machine and set it to a valid * state. Called during device initialization and context restore events. * * Return: 0 */ static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) { struct temp_sensor_registers *tsr = bgp->conf->sensors[id].registers; void __iomem *temp_sensor_ctrl = bgp->base + tsr->temp_sensor_ctrl; int error; u32 val; /* Select continuous or single conversion mode */ if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) { if (TI_BANDGAP_HAS(bgp, CONT_MODE_ONLY)) RMW_BITS(bgp, id, bgap_mode_ctrl, mode_ctrl_mask, 1); else RMW_BITS(bgp, id, bgap_mode_ctrl, mode_ctrl_mask, 0); } /* Set Start of Conversion if available */ if (tsr->bgap_soc_mask) { RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1); /* Wait for EOCZ going up */ error = readl_poll_timeout_atomic(temp_sensor_ctrl, val, val & tsr->bgap_eocz_mask, 1, 1000); if (error) dev_warn(bgp->dev, "eocz timed out waiting high\n"); /* Clear Start of Conversion if available */ RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0); } /* Wait for EOCZ going down, always needed even if no bgap_soc_mask */ error = readl_poll_timeout_atomic(temp_sensor_ctrl, val, !(val & tsr->bgap_eocz_mask), 1, 1500); if (error) dev_warn(bgp->dev, "eocz timed out waiting low\n"); return 0; } /** * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode * @bgp: pointer to struct ti_bandgap * * Call this function only if HAS(MODE_CONFIG) is set. As this driver may * be used for junction temperature monitoring, it is desirable that the * sensors are operational all the time, so that alerts are generated * properly. * * Return: 0 */ static int ti_bandgap_set_continuous_mode(struct ti_bandgap *bgp) { int i; for (i = 0; i < bgp->conf->sensor_count; i++) { /* Perform a single read just before enabling continuous */ ti_bandgap_force_single_read(bgp, i); RMW_BITS(bgp, i, bgap_mode_ctrl, mode_ctrl_mask, 1); } return 0; } /** * ti_bandgap_get_trend() - To fetch the temperature trend of a sensor * @bgp: pointer to struct ti_bandgap * @id: id of the individual sensor * @trend: Pointer to trend. * * This function needs to be called to fetch the temperature trend of a * Particular sensor. The function computes the difference in temperature * w.r.t time. For the bandgaps with built in history buffer the temperatures * are read from the buffer and for those without the Buffer -ENOTSUPP is * returned. * * Return: 0 if no error, else return corresponding error. If no * error then the trend value is passed on to trend parameter */ int ti_bandgap_get_trend(struct ti_bandgap *bgp, int id, int *trend) { struct temp_sensor_registers *tsr; u32 temp1, temp2, reg1, reg2; int t1, t2, interval, ret = 0; ret = ti_bandgap_validate(bgp, id); if (ret) goto exit; if (!TI_BANDGAP_HAS(bgp, HISTORY_BUFFER) || !TI_BANDGAP_HAS(bgp, FREEZE_BIT)) { ret = -ENOTSUPP; goto exit; } spin_lock(&bgp->lock); tsr = bgp->conf->sensors[id].registers; /* Freeze and read the last 2 valid readings */ RMW_BITS(bgp, id, bgap_mask_ctrl, mask_freeze_mask, 1); reg1 = tsr->ctrl_dtemp_1; reg2 = tsr->ctrl_dtemp_2; /* read temperature from history buffer */ temp1 = ti_bandgap_readl(bgp, reg1); temp1 &= tsr->bgap_dtemp_mask; temp2 = ti_bandgap_readl(bgp, reg2); temp2 &= tsr->bgap_dtemp_mask; /* Convert from adc values to mCelsius temperature */ ret = ti_bandgap_adc_to_mcelsius(bgp, temp1, &t1); if (ret) goto unfreeze; ret = ti_bandgap_adc_to_mcelsius(bgp, temp2, &t2); if (ret) goto unfreeze; /* Fetch the update interval */ ret = ti_bandgap_read_update_interval(bgp, id, &interval); if (ret) goto unfreeze; /* Set the interval to 1 ms if bandgap counter delay is not set */ if (interval == 0) interval = 1; *trend = (t1 - t2) / interval; dev_dbg(bgp->dev, "The temperatures are t1 = %d and t2 = %d and trend =%d\n", t1, t2, *trend); unfreeze: RMW_BITS(bgp, id, bgap_mask_ctrl, mask_freeze_mask, 0); spin_unlock(&bgp->lock); exit: return ret; } /** * ti_bandgap_tshut_init() - setup and initialize tshut handling * @bgp: pointer to struct ti_bandgap * @pdev: pointer to device struct platform_device * * Call this function only in case the bandgap features HAS(TSHUT). * In this case, the driver needs to handle the TSHUT signal as an IRQ. * The IRQ is wired as a GPIO, and for this purpose, it is required * to specify which GPIO line is used. TSHUT IRQ is fired anytime * one of the bandgap sensors violates the TSHUT high/hot threshold. * And in that case, the system must go off. * * Return: 0 if no error, else error status */ static int ti_bandgap_tshut_init(struct ti_bandgap *bgp, struct platform_device *pdev) { int status; status = request_irq(gpiod_to_irq(bgp->tshut_gpiod), ti_bandgap_tshut_irq_handler, IRQF_TRIGGER_RISING, "tshut", NULL); if (status) dev_err(bgp->dev, "request irq failed for TSHUT"); return 0; } /** * ti_bandgap_talert_init() - setup and initialize talert handling * @bgp: pointer to struct ti_bandgap * @pdev: pointer to device struct platform_device * * Call this function only in case the bandgap features HAS(TALERT). * In this case, the driver needs to handle the TALERT signals as an IRQs. * TALERT is a normal IRQ and it is fired any time thresholds (hot or cold) * are violated. In these situation, the driver must reprogram the thresholds, * accordingly to specified policy. * * Return: 0 if no error, else return corresponding error. */ static int ti_bandgap_talert_init(struct ti_bandgap *bgp, struct platform_device *pdev) { int ret; bgp->irq = platform_get_irq(pdev, 0); if (bgp->irq < 0) return bgp->irq; ret = request_threaded_irq(bgp->irq, NULL, ti_bandgap_talert_irq_handler, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "talert", bgp); if (ret) { dev_err(&pdev->dev, "Request threaded irq failed.\n"); return ret; } return 0; } static const struct of_device_id of_ti_bandgap_match[]; /** * ti_bandgap_build() - parse DT and setup a struct ti_bandgap * @pdev: pointer to device struct platform_device * * Used to read the device tree properties accordingly to the bandgap * matching version. Based on bandgap version and its capabilities it * will build a struct ti_bandgap out of the required DT entries. * * Return: valid bandgap structure if successful, else returns ERR_PTR * return value must be verified with IS_ERR. */ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *of_id; struct ti_bandgap *bgp; struct resource *res; int i; /* just for the sake */ if (!node) { dev_err(&pdev->dev, "no platform information available\n"); return ERR_PTR(-EINVAL); } bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); if (!bgp) return ERR_PTR(-ENOMEM); of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); if (of_id) bgp->conf = of_id->data; /* register shadow for context save and restore */ bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count, sizeof(*bgp->regval), GFP_KERNEL); if (!bgp->regval) return ERR_PTR(-ENOMEM); i = 0; do { void __iomem *chunk; res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) break; chunk = devm_ioremap_resource(&pdev->dev, res); if (i == 0) bgp->base = chunk; if (IS_ERR(chunk)) return ERR_CAST(chunk); i++; } while (res); if (TI_BANDGAP_HAS(bgp, TSHUT)) { bgp->tshut_gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN); if (IS_ERR(bgp->tshut_gpiod)) { dev_err(&pdev->dev, "invalid gpio for tshut\n"); return ERR_CAST(bgp->tshut_gpiod); } } return bgp; } /* * List of SoCs on which the CPU PM notifier can cause erros on the DTEMP * readout. * Enabled notifier on these machines results in erroneous, random values which * could trigger unexpected thermal shutdown. */ static const struct soc_device_attribute soc_no_cpu_notifier[] = { { .machine = "OMAP4430" }, { /* sentinel */ } }; /*** Device driver call backs ***/ static int ti_bandgap_probe(struct platform_device *pdev) { struct ti_bandgap *bgp; int clk_rate, ret, i; bgp = ti_bandgap_build(pdev); if (IS_ERR(bgp)) { dev_err(&pdev->dev, "failed to fetch platform data\n"); return PTR_ERR(bgp); } bgp->dev = &pdev->dev; if (TI_BANDGAP_HAS(bgp, UNRELIABLE)) dev_warn(&pdev->dev, "This OMAP thermal sensor is unreliable. You've been warned\n"); if (TI_BANDGAP_HAS(bgp, TSHUT)) { ret = ti_bandgap_tshut_init(bgp, pdev); if (ret) { dev_err(&pdev->dev, "failed to initialize system tshut IRQ\n"); return ret; } } bgp->fclock = clk_get(NULL, bgp->conf->fclock_name); if (IS_ERR(bgp->fclock)) { dev_err(&pdev->dev, "failed to request fclock reference\n"); ret = PTR_ERR(bgp->fclock); goto free_irqs; } bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name); if (IS_ERR(bgp->div_clk)) { dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n"); ret = PTR_ERR(bgp->div_clk); goto put_fclock; } for (i = 0; i < bgp->conf->sensor_count; i++) { struct temp_sensor_registers *tsr; u32 val; tsr = bgp->conf->sensors[i].registers; /* * check if the efuse has a non-zero value if not * it is an untrimmed sample and the temperatures * may not be accurate */ val = ti_bandgap_readl(bgp, tsr->bgap_efuse); if (!val) dev_info(&pdev->dev, "Non-trimmed BGAP, Temp not accurate\n"); } clk_rate = clk_round_rate(bgp->div_clk, bgp->conf->sensors[0].ts_data->max_freq); if (clk_rate < bgp->conf->sensors[0].ts_data->min_freq || clk_rate <= 0) { ret = -ENODEV; dev_err(&pdev->dev, "wrong clock rate (%d)\n", clk_rate); goto put_clks; } ret = clk_set_rate(bgp->div_clk, clk_rate); if (ret) dev_err(&pdev->dev, "Cannot re-set clock rate. Continuing\n"); bgp->clk_rate = clk_rate; if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_prepare_enable(bgp->fclock); spin_lock_init(&bgp->lock); bgp->dev = &pdev->dev; platform_set_drvdata(pdev, bgp); ti_bandgap_power(bgp, true); /* Set default counter to 1 for now */ if (TI_BANDGAP_HAS(bgp, COUNTER)) for (i = 0; i < bgp->conf->sensor_count; i++) RMW_BITS(bgp, i, bgap_counter, counter_mask, 1); /* Set default thresholds for alert and shutdown */ for (i = 0; i < bgp->conf->sensor_count; i++) { struct temp_sensor_data *ts_data; ts_data = bgp->conf->sensors[i].ts_data; if (TI_BANDGAP_HAS(bgp, TALERT)) { /* Set initial Talert thresholds */ RMW_BITS(bgp, i, bgap_threshold, threshold_tcold_mask, ts_data->t_cold); RMW_BITS(bgp, i, bgap_threshold, threshold_thot_mask, ts_data->t_hot); /* Enable the alert events */ RMW_BITS(bgp, i, bgap_mask_ctrl, mask_hot_mask, 1); RMW_BITS(bgp, i, bgap_mask_ctrl, mask_cold_mask, 1); } if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG)) { /* Set initial Tshut thresholds */ RMW_BITS(bgp, i, tshut_threshold, tshut_hot_mask, ts_data->tshut_hot); RMW_BITS(bgp, i, tshut_threshold, tshut_cold_mask, ts_data->tshut_cold); } } if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) ti_bandgap_set_continuous_mode(bgp); /* Set .250 seconds time as default counter */ if (TI_BANDGAP_HAS(bgp, COUNTER)) for (i = 0; i < bgp->conf->sensor_count; i++) RMW_BITS(bgp, i, bgap_counter, counter_mask, bgp->clk_rate / 4); /* Every thing is good? Then expose the sensors */ for (i = 0; i < bgp->conf->sensor_count; i++) { char *domain; if (bgp->conf->sensors[i].register_cooling) { ret = bgp->conf->sensors[i].register_cooling(bgp, i); if (ret) goto remove_sensors; } if (bgp->conf->expose_sensor) { domain = bgp->conf->sensors[i].domain; ret = bgp->conf->expose_sensor(bgp, i, domain); if (ret) goto remove_last_cooling; } } /* * Enable the Interrupts once everything is set. Otherwise irq handler * might be called as soon as it is enabled where as rest of framework * is still getting initialised. */ if (TI_BANDGAP_HAS(bgp, TALERT)) { ret = ti_bandgap_talert_init(bgp, pdev); if (ret) { dev_err(&pdev->dev, "failed to initialize Talert IRQ\n"); i = bgp->conf->sensor_count; goto disable_clk; } } #ifdef CONFIG_PM_SLEEP bgp->nb.notifier_call = bandgap_omap_cpu_notifier; if (!soc_device_match(soc_no_cpu_notifier)) cpu_pm_register_notifier(&bgp->nb); #endif return 0; remove_last_cooling: if (bgp->conf->sensors[i].unregister_cooling) bgp->conf->sensors[i].unregister_cooling(bgp, i); remove_sensors: for (i--; i >= 0; i--) { if (bgp->conf->sensors[i].unregister_cooling) bgp->conf->sensors[i].unregister_cooling(bgp, i); if (bgp->conf->remove_sensor) bgp->conf->remove_sensor(bgp, i); } ti_bandgap_power(bgp, false); disable_clk: if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_disable_unprepare(bgp->fclock); put_clks: clk_put(bgp->div_clk); put_fclock: clk_put(bgp->fclock); free_irqs: if (TI_BANDGAP_HAS(bgp, TSHUT)) free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL); return ret; } static int ti_bandgap_remove(struct platform_device *pdev) { struct ti_bandgap *bgp = platform_get_drvdata(pdev); int i; if (!soc_device_match(soc_no_cpu_notifier)) cpu_pm_unregister_notifier(&bgp->nb); /* Remove sensor interfaces */ for (i = 0; i < bgp->conf->sensor_count; i++) { if (bgp->conf->sensors[i].unregister_cooling) bgp->conf->sensors[i].unregister_cooling(bgp, i); if (bgp->conf->remove_sensor) bgp->conf->remove_sensor(bgp, i); } ti_bandgap_power(bgp, false); if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_disable_unprepare(bgp->fclock); clk_put(bgp->fclock); clk_put(bgp->div_clk); if (TI_BANDGAP_HAS(bgp, TALERT)) free_irq(bgp->irq, bgp); if (TI_BANDGAP_HAS(bgp, TSHUT)) free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL); return 0; } #ifdef CONFIG_PM_SLEEP static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp) { int i; for (i = 0; i < bgp->conf->sensor_count; i++) { struct temp_sensor_registers *tsr; struct temp_sensor_regval *rval; rval = &bgp->regval[i]; tsr = bgp->conf->sensors[i].registers; if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) rval->bg_mode_ctrl = ti_bandgap_readl(bgp, tsr->bgap_mode_ctrl); if (TI_BANDGAP_HAS(bgp, COUNTER)) rval->bg_counter = ti_bandgap_readl(bgp, tsr->bgap_counter); if (TI_BANDGAP_HAS(bgp, TALERT)) { rval->bg_threshold = ti_bandgap_readl(bgp, tsr->bgap_threshold); rval->bg_ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl); } if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG)) rval->tshut_threshold = ti_bandgap_readl(bgp, tsr->tshut_threshold); } return 0; } static int ti_bandgap_restore_ctxt(struct ti_bandgap *bgp) { int i; for (i = 0; i < bgp->conf->sensor_count; i++) { struct temp_sensor_registers *tsr; struct temp_sensor_regval *rval; rval = &bgp->regval[i]; tsr = bgp->conf->sensors[i].registers; if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG)) ti_bandgap_writel(bgp, rval->tshut_threshold, tsr->tshut_threshold); /* Force immediate temperature measurement and update * of the DTEMP field */ ti_bandgap_force_single_read(bgp, i); if (TI_BANDGAP_HAS(bgp, COUNTER)) ti_bandgap_writel(bgp, rval->bg_counter, tsr->bgap_counter); if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) ti_bandgap_writel(bgp, rval->bg_mode_ctrl, tsr->bgap_mode_ctrl); if (TI_BANDGAP_HAS(bgp, TALERT)) { ti_bandgap_writel(bgp, rval->bg_threshold, tsr->bgap_threshold); ti_bandgap_writel(bgp, rval->bg_ctrl, tsr->bgap_mask_ctrl); } } return 0; } static int ti_bandgap_suspend(struct device *dev) { struct ti_bandgap *bgp = dev_get_drvdata(dev); int err; err = ti_bandgap_save_ctxt(bgp); ti_bandgap_power(bgp, false); if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_disable_unprepare(bgp->fclock); bgp->is_suspended = true; return err; } static int bandgap_omap_cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { struct ti_bandgap *bgp; bgp = container_of(nb, struct ti_bandgap, nb); spin_lock(&bgp->lock); switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (bgp->is_suspended) break; ti_bandgap_save_ctxt(bgp); ti_bandgap_power(bgp, false); if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_disable(bgp->fclock); break; case CPU_CLUSTER_PM_ENTER_FAILED: case CPU_CLUSTER_PM_EXIT: if (bgp->is_suspended) break; if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_enable(bgp->fclock); ti_bandgap_power(bgp, true); ti_bandgap_restore_ctxt(bgp); break; } spin_unlock(&bgp->lock); return NOTIFY_OK; } static int ti_bandgap_resume(struct device *dev) { struct ti_bandgap *bgp = dev_get_drvdata(dev); if (TI_BANDGAP_HAS(bgp, CLK_CTRL)) clk_prepare_enable(bgp->fclock); ti_bandgap_power(bgp, true); bgp->is_suspended = false; return ti_bandgap_restore_ctxt(bgp); } static SIMPLE_DEV_PM_OPS(ti_bandgap_dev_pm_ops, ti_bandgap_suspend, ti_bandgap_resume); #define DEV_PM_OPS (&ti_bandgap_dev_pm_ops) #else #define DEV_PM_OPS NULL #endif static const struct of_device_id of_ti_bandgap_match[] = { #ifdef CONFIG_OMAP3_THERMAL { .compatible = "ti,omap34xx-bandgap", .data = (void *)&omap34xx_data, }, { .compatible = "ti,omap36xx-bandgap", .data = (void *)&omap36xx_data, }, #endif #ifdef CONFIG_OMAP4_THERMAL { .compatible = "ti,omap4430-bandgap", .data = (void *)&omap4430_data, }, { .compatible = "ti,omap4460-bandgap", .data = (void *)&omap4460_data, }, { .compatible = "ti,omap4470-bandgap", .data = (void *)&omap4470_data, }, #endif #ifdef CONFIG_OMAP5_THERMAL { .compatible = "ti,omap5430-bandgap", .data = (void *)&omap5430_data, }, #endif #ifdef CONFIG_DRA752_THERMAL { .compatible = "ti,dra752-bandgap", .data = (void *)&dra752_data, }, #endif /* Sentinel */ { }, }; MODULE_DEVICE_TABLE(of, of_ti_bandgap_match); static struct platform_driver ti_bandgap_sensor_driver = { .probe = ti_bandgap_probe, .remove = ti_bandgap_remove, .driver = { .name = "ti-soc-thermal", .pm = DEV_PM_OPS, .of_match_table = of_ti_bandgap_match, }, }; module_platform_driver(ti_bandgap_sensor_driver); MODULE_DESCRIPTION("OMAP4+ bandgap temperature sensor driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:ti-soc-thermal"); MODULE_AUTHOR("Texas Instrument Inc.");
linux-master
drivers/thermal/ti-soc-thermal/ti-bandgap.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 thermal driver. * * Copyright (C) 2011-2012 Texas Instruments Inc. * Contact: * Eduardo Valentin <[email protected]> */ #include "ti-thermal.h" #include "ti-bandgap.h" #include "omap4xxx-bandgap.h" /* * OMAP4430 has one instance of thermal sensor for MPU * need to describe the individual bit fields */ static struct temp_sensor_registers omap4430_mpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP4430_TEMP_SENSOR_CTRL_OFFSET, .bgap_tempsoff_mask = OMAP4430_BGAP_TEMPSOFF_MASK, .bgap_soc_mask = OMAP4430_BGAP_TEMP_SENSOR_SOC_MASK, .bgap_eocz_mask = OMAP4430_BGAP_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = OMAP4430_BGAP_TEMP_SENSOR_DTEMP_MASK, .bgap_mode_ctrl = OMAP4430_TEMP_SENSOR_CTRL_OFFSET, .mode_ctrl_mask = OMAP4430_CONTINUOUS_MODE_MASK, .bgap_efuse = OMAP4430_FUSE_OPP_BGAP, }; /* Thresholds and limits for OMAP4430 MPU temperature sensor */ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { .min_freq = OMAP4430_MIN_FREQ, .max_freq = OMAP4430_MAX_FREQ, }; /* * Temperature values in milli degree celsius * ADC code values from 13 to 107, see TRM * "18.4.10.2.3 ADC Codes Versus Temperature". */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, 115000, 117000, 118500, 120000, 122000, 123500, 125000, }; /* OMAP4430 data */ const struct ti_bandgap_data omap4430_data = { .features = TI_BANDGAP_FEATURE_MODE_CONFIG | TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_POWER_SWITCH | TI_BANDGAP_FEATURE_CONT_MODE_ONLY, .fclock_name = "bandgap_fclk", .div_ck_name = "bandgap_fclk", .conv_table = omap4430_adc_to_temp, .adc_start_val = OMAP4430_ADC_START_VALUE, .adc_end_val = OMAP4430_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .sensors = { { .registers = &omap4430_mpu_temp_sensor_registers, .ts_data = &omap4430_mpu_temp_sensor_data, .domain = "cpu", .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4430, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4430, .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, }, }, .sensor_count = 1, }; /* * OMAP4460 has one instance of thermal sensor for MPU * need to describe the individual bit fields */ static struct temp_sensor_registers omap4460_mpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP4460_TEMP_SENSOR_CTRL_OFFSET, .bgap_tempsoff_mask = OMAP4460_BGAP_TEMPSOFF_MASK, .bgap_soc_mask = OMAP4460_BGAP_TEMP_SENSOR_SOC_MASK, .bgap_eocz_mask = OMAP4460_BGAP_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = OMAP4460_BGAP_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = OMAP4460_BGAP_CTRL_OFFSET, .mask_hot_mask = OMAP4460_MASK_HOT_MASK, .mask_cold_mask = OMAP4460_MASK_COLD_MASK, .bgap_mode_ctrl = OMAP4460_BGAP_CTRL_OFFSET, .mode_ctrl_mask = OMAP4460_CONTINUOUS_MODE_MASK, .bgap_counter = OMAP4460_BGAP_COUNTER_OFFSET, .counter_mask = OMAP4460_COUNTER_MASK, .bgap_threshold = OMAP4460_BGAP_THRESHOLD_OFFSET, .threshold_thot_mask = OMAP4460_T_HOT_MASK, .threshold_tcold_mask = OMAP4460_T_COLD_MASK, .tshut_threshold = OMAP4460_BGAP_TSHUT_OFFSET, .tshut_hot_mask = OMAP4460_TSHUT_HOT_MASK, .tshut_cold_mask = OMAP4460_TSHUT_COLD_MASK, .bgap_status = OMAP4460_BGAP_STATUS_OFFSET, .status_hot_mask = OMAP4460_HOT_FLAG_MASK, .status_cold_mask = OMAP4460_COLD_FLAG_MASK, .bgap_efuse = OMAP4460_FUSE_OPP_BGAP, }; /* Thresholds and limits for OMAP4460 MPU temperature sensor */ static struct temp_sensor_data omap4460_mpu_temp_sensor_data = { .tshut_hot = OMAP4460_TSHUT_HOT, .tshut_cold = OMAP4460_TSHUT_COLD, .t_hot = OMAP4460_T_HOT, .t_cold = OMAP4460_T_COLD, .min_freq = OMAP4460_MIN_FREQ, .max_freq = OMAP4460_MAX_FREQ, }; /* * Temperature values in milli degree celsius * ADC code values from 530 to 923 */ static const int omap4460_adc_to_temp[OMAP4460_ADC_END_VALUE - OMAP4460_ADC_START_VALUE + 1] = { -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200, -37800, -37300, -36800, -36400, -36000, -35600, -35200, -34800, -34300, -33800, -33400, -33000, -32600, -32200, -31800, -31300, -30800, -30400, -30000, -29600, -29200, -28700, -28200, -27800, -27400, -27000, -26600, -26200, -25700, -25200, -24800, -24400, -24000, -23600, -23200, -22700, -22200, -21800, -21400, -21000, -20600, -20200, -19700, -19200, -18800, -18400, -18000, -17600, -17200, -16700, -16200, -15800, -15400, -15000, -14600, -14200, -13700, -13200, -12800, -12400, -12000, -11600, -11200, -10700, -10200, -9800, -9400, -9000, -8600, -8200, -7700, -7200, -6800, -6400, -6000, -5600, -5200, -4800, -4300, -3800, -3400, -3000, -2600, -2200, -1800, -1300, -800, -400, 0, 400, 800, 1200, 1600, 2100, 2600, 3000, 3400, 3800, 4200, 4600, 5100, 5600, 6000, 6400, 6800, 7200, 7600, 8000, 8500, 9000, 9400, 9800, 10200, 10600, 11000, 11400, 11900, 12400, 12800, 13200, 13600, 14000, 14400, 14800, 15300, 15800, 16200, 16600, 17000, 17400, 17800, 18200, 18700, 19200, 19600, 20000, 20400, 20800, 21200, 21600, 22100, 22600, 23000, 23400, 23800, 24200, 24600, 25000, 25400, 25900, 26400, 26800, 27200, 27600, 28000, 28400, 28800, 29300, 29800, 30200, 30600, 31000, 31400, 31800, 32200, 32600, 33100, 33600, 34000, 34400, 34800, 35200, 35600, 36000, 36400, 36800, 37300, 37800, 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41100, 41600, 42000, 42400, 42800, 43200, 43600, 44000, 44400, 44800, 45300, 45800, 46200, 46600, 47000, 47400, 47800, 48200, 48600, 49000, 49500, 50000, 50400, 50800, 51200, 51600, 52000, 52400, 52800, 53200, 53700, 54200, 54600, 55000, 55400, 55800, 56200, 56600, 57000, 57400, 57800, 58200, 58700, 59200, 59600, 60000, 60400, 60800, 61200, 61600, 62000, 62400, 62800, 63300, 63800, 64200, 64600, 65000, 65400, 65800, 66200, 66600, 67000, 67400, 67800, 68200, 68700, 69200, 69600, 70000, 70400, 70800, 71200, 71600, 72000, 72400, 72800, 73200, 73600, 74100, 74600, 75000, 75400, 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000, 79400, 79800, 80300, 80800, 81200, 81600, 82000, 82400, 82800, 83200, 83600, 84000, 84400, 84800, 85200, 85600, 86000, 86400, 86800, 87300, 87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, 91400, 91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, 95500, 96000, 96400, 96800, 97200, 97600, 98000, 98400, 98800, 99200, 99600, 100000, 100400, 100800, 101200, 101600, 102000, 102400, 102800, 103200, 103600, 104000, 104400, 104800, 105200, 105600, 106100, 106600, 107000, 107400, 107800, 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000, 111400, 111800, 112200, 112600, 113000, 113400, 113800, 114200, 114600, 115000, 115400, 115800, 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000, 119400, 119800, 120200, 120600, 121000, 121400, 121800, 122200, 122600, 123000, 123400, 123800, 124200, 124600, 124900, 125000, 125000, 125000, 125000 }; /* OMAP4460 data */ const struct ti_bandgap_data omap4460_data = { .features = TI_BANDGAP_FEATURE_TSHUT | TI_BANDGAP_FEATURE_TSHUT_CONFIG | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_MODE_CONFIG | TI_BANDGAP_FEATURE_POWER_SWITCH | TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_COUNTER, .fclock_name = "bandgap_ts_fclk", .div_ck_name = "div_ts_ck", .conv_table = omap4460_adc_to_temp, .adc_start_val = OMAP4460_ADC_START_VALUE, .adc_end_val = OMAP4460_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .report_temperature = ti_thermal_report_sensor_temperature, .sensors = { { .registers = &omap4460_mpu_temp_sensor_registers, .ts_data = &omap4460_mpu_temp_sensor_data, .domain = "cpu", .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4460, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4460, .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, }, }, .sensor_count = 1, }; /* OMAP4470 data */ const struct ti_bandgap_data omap4470_data = { .features = TI_BANDGAP_FEATURE_TSHUT | TI_BANDGAP_FEATURE_TSHUT_CONFIG | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_MODE_CONFIG | TI_BANDGAP_FEATURE_POWER_SWITCH | TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_COUNTER, .fclock_name = "bandgap_ts_fclk", .div_ck_name = "div_ts_ck", .conv_table = omap4460_adc_to_temp, .adc_start_val = OMAP4460_ADC_START_VALUE, .adc_end_val = OMAP4460_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .report_temperature = ti_thermal_report_sensor_temperature, .sensors = { { .registers = &omap4460_mpu_temp_sensor_registers, .ts_data = &omap4460_mpu_temp_sensor_data, .domain = "cpu", .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4470, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4470, .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, }, }, .sensor_count = 1, };
linux-master
drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP thermal driver interface * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * Contact: * Eduardo Valentin <[email protected]> */ #include <linux/device.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/thermal.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/cpu_cooling.h> #include <linux/of.h> #include "ti-thermal.h" #include "ti-bandgap.h" #include "../thermal_hwmon.h" #define TI_BANDGAP_UPDATE_INTERVAL_MS 250 /* common data structures */ struct ti_thermal_data { struct cpufreq_policy *policy; struct thermal_zone_device *ti_thermal; struct thermal_zone_device *pcb_tz; struct thermal_cooling_device *cool_dev; struct ti_bandgap *bgp; enum thermal_device_mode mode; struct work_struct thermal_wq; int sensor_id; bool our_zone; }; static void ti_thermal_work(struct work_struct *work) { struct ti_thermal_data *data = container_of(work, struct ti_thermal_data, thermal_wq); thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED); dev_dbg(data->bgp->dev, "updated thermal zone %s\n", thermal_zone_device_type(data->ti_thermal)); } /** * ti_thermal_hotspot_temperature - returns sensor extrapolated temperature * @t: omap sensor temperature * @s: omap sensor slope value * @c: omap sensor const value */ static inline int ti_thermal_hotspot_temperature(int t, int s, int c) { int delta = t * s / 1000 + c; if (delta < 0) delta = 0; return t + delta; } /* thermal zone ops */ /* Get temperature callback function for thermal zone */ static inline int __ti_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct thermal_zone_device *pcb_tz = NULL; struct ti_thermal_data *data = thermal_zone_device_priv(tz); struct ti_bandgap *bgp; const struct ti_temp_sensor *s; int ret, tmp, slope, constant; int pcb_temp; if (!data) return 0; bgp = data->bgp; s = &bgp->conf->sensors[data->sensor_id]; ret = ti_bandgap_read_temperature(bgp, data->sensor_id, &tmp); if (ret) return ret; /* Default constants */ slope = thermal_zone_get_slope(tz); constant = thermal_zone_get_offset(tz); pcb_tz = data->pcb_tz; /* In case pcb zone is available, use the extrapolation rule with it */ if (!IS_ERR(pcb_tz)) { ret = thermal_zone_get_temp(pcb_tz, &pcb_temp); if (!ret) { tmp -= pcb_temp; /* got a valid PCB temp */ slope = s->slope_pcb; constant = s->constant_pcb; } else { dev_err(bgp->dev, "Failed to read PCB state. Using defaults\n"); ret = 0; } } *temp = ti_thermal_hotspot_temperature(tmp, slope, constant); return ret; } static int __ti_thermal_get_trend(struct thermal_zone_device *tz, const struct thermal_trip *trip, enum thermal_trend *trend) { struct ti_thermal_data *data = thermal_zone_device_priv(tz); struct ti_bandgap *bgp; int id, tr, ret = 0; bgp = data->bgp; id = data->sensor_id; ret = ti_bandgap_get_trend(bgp, id, &tr); if (ret) return ret; if (tr > 0) *trend = THERMAL_TREND_RAISING; else if (tr < 0) *trend = THERMAL_TREND_DROPPING; else *trend = THERMAL_TREND_STABLE; return 0; } static const struct thermal_zone_device_ops ti_of_thermal_ops = { .get_temp = __ti_thermal_get_temp, .get_trend = __ti_thermal_get_trend, }; static struct ti_thermal_data *ti_thermal_build_data(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = devm_kzalloc(bgp->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(bgp->dev, "kzalloc fail\n"); return NULL; } data->sensor_id = id; data->bgp = bgp; data->mode = THERMAL_DEVICE_ENABLED; /* pcb_tz will be either valid or PTR_ERR() */ data->pcb_tz = thermal_zone_get_zone_by_name("pcb"); INIT_WORK(&data->thermal_wq, ti_thermal_work); return data; } int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); if (IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) return -EINVAL; /* in case this is specified by DT */ data->ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, data, &ti_of_thermal_ops); if (IS_ERR(data->ti_thermal)) { dev_err(bgp->dev, "thermal zone device is NULL\n"); return PTR_ERR(data->ti_thermal); } ti_bandgap_set_sensor_data(bgp, id, data); ti_bandgap_write_update_interval(bgp, data->sensor_id, TI_BANDGAP_UPDATE_INTERVAL_MS); devm_thermal_add_hwmon_sysfs(bgp->dev, data->ti_thermal); return 0; } int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); if (!IS_ERR_OR_NULL(data) && data->ti_thermal) { if (data->our_zone) thermal_zone_device_unregister(data->ti_thermal); } return 0; } int ti_thermal_report_sensor_temperature(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); schedule_work(&data->thermal_wq); return 0; } int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; struct device_node *np = bgp->dev->of_node; /* * We are assuming here that if one deploys the zone * using DT, then it must be aware that the cooling device * loading has to happen via cpufreq driver. */ if (of_property_present(np, "#thermal-sensor-cells")) return 0; data = ti_bandgap_get_sensor_data(bgp, id); if (!data || IS_ERR(data)) data = ti_thermal_build_data(bgp, id); if (!data) return -EINVAL; data->policy = cpufreq_cpu_get(0); if (!data->policy) { pr_debug("%s: CPUFreq policy not found\n", __func__); return -EPROBE_DEFER; } /* Register cooling device */ data->cool_dev = cpufreq_cooling_register(data->policy); if (IS_ERR(data->cool_dev)) { int ret = PTR_ERR(data->cool_dev); dev_err(bgp->dev, "Failed to register cpu cooling device %d\n", ret); cpufreq_cpu_put(data->policy); return ret; } ti_bandgap_set_sensor_data(bgp, id, data); return 0; } int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); if (!IS_ERR_OR_NULL(data)) { cpufreq_cooling_unregister(data->cool_dev); if (data->policy) cpufreq_cpu_put(data->policy); } return 0; }
linux-master
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 thermal driver. * * Copyright (C) 2011-2012 Texas Instruments Inc. * Copyright (C) 2014 Pavel Machek <[email protected]> * * Note * http://www.ti.com/lit/er/sprz278f/sprz278f.pdf "Advisory * 3.1.1.186 MMC OCP Clock Not Gated When Thermal Sensor Is Used" * * Also TI says: * Just be careful when you try to make thermal policy like decisions * based on this sensor. Placement of the sensor w.r.t the actual logic * generating heat has to be a factor as well. If you are just looking * for an approximation temperature (thermometerish kind), you might be * ok with this. I am not sure we'd find any TI data around this.. just a * heads up. */ #include "ti-thermal.h" #include "ti-bandgap.h" /* * OMAP34XX has one instance of thermal sensor for MPU * need to describe the individual bit fields */ static struct temp_sensor_registers omap34xx_mpu_temp_sensor_registers = { .temp_sensor_ctrl = 0, .bgap_soc_mask = BIT(8), .bgap_eocz_mask = BIT(7), .bgap_dtemp_mask = 0x7f, .bgap_mode_ctrl = 0, .mode_ctrl_mask = BIT(9), }; /* Thresholds and limits for OMAP34XX MPU temperature sensor */ static struct temp_sensor_data omap34xx_mpu_temp_sensor_data = { .min_freq = 32768, .max_freq = 32768, }; /* * Temperature values in milli degree celsius */ static const int omap34xx_adc_to_temp[128] = { -40000, -40000, -40000, -40000, -40000, -39000, -38000, -36000, -34000, -32000, -31000, -29000, -28000, -26000, -25000, -24000, -22000, -21000, -19000, -18000, -17000, -15000, -14000, -12000, -11000, -9000, -8000, -7000, -5000, -4000, -2000, -1000, 0000, 1000, 3000, 4000, 5000, 7000, 8000, 10000, 11000, 13000, 14000, 15000, 17000, 18000, 20000, 21000, 22000, 24000, 25000, 27000, 28000, 30000, 31000, 32000, 34000, 35000, 37000, 38000, 39000, 41000, 42000, 44000, 45000, 47000, 48000, 49000, 51000, 52000, 53000, 55000, 56000, 58000, 59000, 60000, 62000, 63000, 65000, 66000, 67000, 69000, 70000, 72000, 73000, 74000, 76000, 77000, 79000, 80000, 81000, 83000, 84000, 85000, 87000, 88000, 89000, 91000, 92000, 94000, 95000, 96000, 98000, 99000, 100000, 102000, 103000, 105000, 106000, 107000, 109000, 110000, 111000, 113000, 114000, 116000, 117000, 118000, 120000, 121000, 122000, 124000, 124000, 125000, 125000, 125000, 125000, 125000 }; /* OMAP34XX data */ const struct ti_bandgap_data omap34xx_data = { .features = TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_UNRELIABLE, .fclock_name = "ts_fck", .div_ck_name = "ts_fck", .conv_table = omap34xx_adc_to_temp, .adc_start_val = 0, .adc_end_val = 127, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .sensors = { { .registers = &omap34xx_mpu_temp_sensor_registers, .ts_data = &omap34xx_mpu_temp_sensor_data, .domain = "cpu", .slope_pcb = 0, .constant_pcb = 20000, .register_cooling = NULL, .unregister_cooling = NULL, }, }, .sensor_count = 1, }; /* * OMAP36XX has one instance of thermal sensor for MPU * need to describe the individual bit fields */ static struct temp_sensor_registers omap36xx_mpu_temp_sensor_registers = { .temp_sensor_ctrl = 0, .bgap_soc_mask = BIT(9), .bgap_eocz_mask = BIT(8), .bgap_dtemp_mask = 0xFF, .bgap_mode_ctrl = 0, .mode_ctrl_mask = BIT(10), }; /* Thresholds and limits for OMAP36XX MPU temperature sensor */ static struct temp_sensor_data omap36xx_mpu_temp_sensor_data = { .min_freq = 32768, .max_freq = 32768, }; /* * Temperature values in milli degree celsius */ static const int omap36xx_adc_to_temp[128] = { -40000, -40000, -40000, -40000, -40000, -40000, -40000, -40000, -40000, -40000, -40000, -40000, -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, 115000, 117000, 118500, 120000, 122000, 123500, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000, 125000 }; /* OMAP36XX data */ const struct ti_bandgap_data omap36xx_data = { .features = TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_UNRELIABLE, .fclock_name = "ts_fck", .div_ck_name = "ts_fck", .conv_table = omap36xx_adc_to_temp, .adc_start_val = 0, .adc_end_val = 127, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .sensors = { { .registers = &omap36xx_mpu_temp_sensor_registers, .ts_data = &omap36xx_mpu_temp_sensor_data, .domain = "cpu", .slope_pcb = 0, .constant_pcb = 20000, .register_cooling = NULL, .unregister_cooling = NULL, }, }, .sensor_count = 1, };
linux-master
drivers/thermal/ti-soc-thermal/omap3-thermal-data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP5 thermal driver. * * Copyright (C) 2011-2012 Texas Instruments Inc. * Contact: * Eduardo Valentin <[email protected]> */ #include "ti-thermal.h" #include "ti-bandgap.h" #include "omap5xxx-bandgap.h" /* * OMAP5430 has three instances of thermal sensor for MPU, GPU & CORE, * need to describe the individual registers and bit fields. */ /* * OMAP5430 MPU thermal sensor register offset and bit-fields */ static struct temp_sensor_registers omap5430_mpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP5430_TEMP_SENSOR_MPU_OFFSET, .bgap_tempsoff_mask = OMAP5430_BGAP_TEMPSOFF_MASK, .bgap_eocz_mask = OMAP5430_BGAP_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET, .mask_hot_mask = OMAP5430_MASK_HOT_MPU_MASK, .mask_cold_mask = OMAP5430_MASK_COLD_MPU_MASK, .mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK, .mask_freeze_mask = OMAP5430_MASK_FREEZE_MPU_MASK, .bgap_counter = OMAP5430_BGAP_CTRL_OFFSET, .counter_mask = OMAP5430_COUNTER_MASK, .bgap_threshold = OMAP5430_BGAP_THRESHOLD_MPU_OFFSET, .threshold_thot_mask = OMAP5430_T_HOT_MASK, .threshold_tcold_mask = OMAP5430_T_COLD_MASK, .tshut_threshold = OMAP5430_BGAP_TSHUT_MPU_OFFSET, .tshut_hot_mask = OMAP5430_TSHUT_HOT_MASK, .tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK, .bgap_status = OMAP5430_BGAP_STATUS_OFFSET, .status_hot_mask = OMAP5430_HOT_MPU_FLAG_MASK, .status_cold_mask = OMAP5430_COLD_MPU_FLAG_MASK, .ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_MPU_1_OFFSET, .ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_MPU_2_OFFSET, .bgap_efuse = OMAP5430_FUSE_OPP_BGAP_MPU, }; /* * OMAP5430 GPU thermal sensor register offset and bit-fields */ static struct temp_sensor_registers omap5430_gpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP5430_TEMP_SENSOR_GPU_OFFSET, .bgap_tempsoff_mask = OMAP5430_BGAP_TEMPSOFF_MASK, .bgap_eocz_mask = OMAP5430_BGAP_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET, .mask_hot_mask = OMAP5430_MASK_HOT_GPU_MASK, .mask_cold_mask = OMAP5430_MASK_COLD_GPU_MASK, .mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK, .mask_freeze_mask = OMAP5430_MASK_FREEZE_GPU_MASK, .bgap_counter = OMAP5430_BGAP_CTRL_OFFSET, .counter_mask = OMAP5430_COUNTER_MASK, .bgap_threshold = OMAP5430_BGAP_THRESHOLD_GPU_OFFSET, .threshold_thot_mask = OMAP5430_T_HOT_MASK, .threshold_tcold_mask = OMAP5430_T_COLD_MASK, .tshut_threshold = OMAP5430_BGAP_TSHUT_GPU_OFFSET, .tshut_hot_mask = OMAP5430_TSHUT_HOT_MASK, .tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK, .bgap_status = OMAP5430_BGAP_STATUS_OFFSET, .status_hot_mask = OMAP5430_HOT_GPU_FLAG_MASK, .status_cold_mask = OMAP5430_COLD_GPU_FLAG_MASK, .ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_GPU_1_OFFSET, .ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_GPU_2_OFFSET, .bgap_efuse = OMAP5430_FUSE_OPP_BGAP_GPU, }; /* * OMAP5430 CORE thermal sensor register offset and bit-fields */ static struct temp_sensor_registers omap5430_core_temp_sensor_registers = { .temp_sensor_ctrl = OMAP5430_TEMP_SENSOR_CORE_OFFSET, .bgap_tempsoff_mask = OMAP5430_BGAP_TEMPSOFF_MASK, .bgap_eocz_mask = OMAP5430_BGAP_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET, .mask_hot_mask = OMAP5430_MASK_HOT_CORE_MASK, .mask_cold_mask = OMAP5430_MASK_COLD_CORE_MASK, .mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK, .mask_freeze_mask = OMAP5430_MASK_FREEZE_CORE_MASK, .bgap_counter = OMAP5430_BGAP_CTRL_OFFSET, .counter_mask = OMAP5430_COUNTER_MASK, .bgap_threshold = OMAP5430_BGAP_THRESHOLD_CORE_OFFSET, .threshold_thot_mask = OMAP5430_T_HOT_MASK, .threshold_tcold_mask = OMAP5430_T_COLD_MASK, .tshut_threshold = OMAP5430_BGAP_TSHUT_CORE_OFFSET, .tshut_hot_mask = OMAP5430_TSHUT_HOT_MASK, .tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK, .bgap_status = OMAP5430_BGAP_STATUS_OFFSET, .status_hot_mask = OMAP5430_HOT_CORE_FLAG_MASK, .status_cold_mask = OMAP5430_COLD_CORE_FLAG_MASK, .ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_CORE_1_OFFSET, .ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_CORE_2_OFFSET, .bgap_efuse = OMAP5430_FUSE_OPP_BGAP_CORE, }; /* Thresholds and limits for OMAP5430 MPU temperature sensor */ static struct temp_sensor_data omap5430_mpu_temp_sensor_data = { .tshut_hot = OMAP5430_MPU_TSHUT_HOT, .tshut_cold = OMAP5430_MPU_TSHUT_COLD, .t_hot = OMAP5430_MPU_T_HOT, .t_cold = OMAP5430_MPU_T_COLD, .min_freq = OMAP5430_MPU_MIN_FREQ, .max_freq = OMAP5430_MPU_MAX_FREQ, }; /* Thresholds and limits for OMAP5430 GPU temperature sensor */ static struct temp_sensor_data omap5430_gpu_temp_sensor_data = { .tshut_hot = OMAP5430_GPU_TSHUT_HOT, .tshut_cold = OMAP5430_GPU_TSHUT_COLD, .t_hot = OMAP5430_GPU_T_HOT, .t_cold = OMAP5430_GPU_T_COLD, .min_freq = OMAP5430_GPU_MIN_FREQ, .max_freq = OMAP5430_GPU_MAX_FREQ, }; /* Thresholds and limits for OMAP5430 CORE temperature sensor */ static struct temp_sensor_data omap5430_core_temp_sensor_data = { .tshut_hot = OMAP5430_CORE_TSHUT_HOT, .tshut_cold = OMAP5430_CORE_TSHUT_COLD, .t_hot = OMAP5430_CORE_T_HOT, .t_cold = OMAP5430_CORE_T_COLD, .min_freq = OMAP5430_CORE_MIN_FREQ, .max_freq = OMAP5430_CORE_MAX_FREQ, }; /* * OMAP54xx ES2.0 : Temperature values in milli degree celsius * ADC code values from 540 to 945 */ static int omap5430_adc_to_temp[ OMAP5430_ADC_END_VALUE - OMAP5430_ADC_START_VALUE + 1] = { /* Index 540 - 549 */ -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200, -37800, /* Index 550 - 559 */ -37400, -37000, -36600, -36200, -35800, -35300, -34700, -34200, -33800, -33400, /* Index 560 - 569 */ -33000, -32600, -32200, -31800, -31400, -31000, -30600, -30200, -29800, -29400, /* Index 570 - 579 */ -29000, -28600, -28200, -27700, -27100, -26600, -26200, -25800, -25400, -25000, /* Index 580 - 589 */ -24600, -24200, -23800, -23400, -23000, -22600, -22200, -21600, -21400, -21000, /* Index 590 - 599 */ -20500, -19900, -19400, -19000, -18600, -18200, -17800, -17400, -17000, -16600, /* Index 600 - 609 */ -16200, -15800, -15400, -15000, -14600, -14200, -13800, -13400, -13000, -12500, /* Index 610 - 619 */ -11900, -11400, -11000, -10600, -10200, -9800, -9400, -9000, -8600, -8200, /* Index 620 - 629 */ -7800, -7400, -7000, -6600, -6200, -5800, -5400, -5000, -4500, -3900, /* Index 630 - 639 */ -3400, -3000, -2600, -2200, -1800, -1400, -1000, -600, -200, 200, /* Index 640 - 649 */ 600, 1000, 1400, 1800, 2200, 2600, 3000, 3400, 3900, 4500, /* Index 650 - 659 */ 5000, 5400, 5800, 6200, 6600, 7000, 7400, 7800, 8200, 8600, /* Index 660 - 669 */ 9000, 9400, 9800, 10200, 10600, 11000, 11400, 11800, 12200, 12700, /* Index 670 - 679 */ 13300, 13800, 14200, 14600, 15000, 15400, 15800, 16200, 16600, 17000, /* Index 680 - 689 */ 17400, 17800, 18200, 18600, 19000, 19400, 19800, 20200, 20600, 21100, /* Index 690 - 699 */ 21400, 21900, 22500, 23000, 23400, 23800, 24200, 24600, 25000, 25400, /* Index 700 - 709 */ 25800, 26200, 26600, 27000, 27400, 27800, 28200, 28600, 29000, 29400, /* Index 710 - 719 */ 29800, 30200, 30600, 31000, 31400, 31900, 32500, 33000, 33400, 33800, /* Index 720 - 729 */ 34200, 34600, 35000, 35400, 35800, 36200, 36600, 37000, 37400, 37800, /* Index 730 - 739 */ 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41000, 41400, 41800, /* Index 740 - 749 */ 42200, 42600, 43100, 43700, 44200, 44600, 45000, 45400, 45800, 46200, /* Index 750 - 759 */ 46600, 47000, 47400, 47800, 48200, 48600, 49000, 49400, 49800, 50200, /* Index 760 - 769 */ 50600, 51000, 51400, 51800, 52200, 52600, 53000, 53400, 53800, 54200, /* Index 770 - 779 */ 54600, 55000, 55400, 55900, 56500, 57000, 57400, 57800, 58200, 58600, /* Index 780 - 789 */ 59000, 59400, 59800, 60200, 60600, 61000, 61400, 61800, 62200, 62600, /* Index 790 - 799 */ 63000, 63400, 63800, 64200, 64600, 65000, 65400, 65800, 66200, 66600, /* Index 800 - 809 */ 67000, 67400, 67800, 68200, 68600, 69000, 69400, 69800, 70200, 70600, /* Index 810 - 819 */ 71000, 71500, 72100, 72600, 73000, 73400, 73800, 74200, 74600, 75000, /* Index 820 - 829 */ 75400, 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000, /* Index 830 - 839 */ 79400, 79800, 80200, 80600, 81000, 81400, 81800, 82200, 82600, 83000, /* Index 840 - 849 */ 83400, 83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000, /* Index 850 - 859 */ 87400, 87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, /* Index 860 - 869 */ 91400, 91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, /* Index 870 - 879 */ 95400, 95800, 96200, 96600, 97000, 97500, 98100, 98600, 99000, 99400, /* Index 880 - 889 */ 99800, 100200, 100600, 101000, 101400, 101800, 102200, 102600, 103000, 103400, /* Index 890 - 899 */ 103800, 104200, 104600, 105000, 105400, 105800, 106200, 106600, 107000, 107400, /* Index 900 - 909 */ 107800, 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000, 111400, /* Index 910 - 919 */ 111800, 112200, 112600, 113000, 113400, 113800, 114200, 114600, 115000, 115400, /* Index 920 - 929 */ 115800, 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000, 119400, /* Index 930 - 939 */ 119800, 120200, 120600, 121000, 121400, 121800, 122400, 122600, 123000, 123400, /* Index 940 - 945 */ 123800, 124200, 124600, 124900, 125000, 125000, }; /* OMAP54xx ES2.0 data */ const struct ti_bandgap_data omap5430_data = { .features = TI_BANDGAP_FEATURE_TSHUT_CONFIG | TI_BANDGAP_FEATURE_FREEZE_BIT | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_COUNTER_DELAY | TI_BANDGAP_FEATURE_HISTORY_BUFFER, .fclock_name = "l3instr_ts_gclk_div", .div_ck_name = "l3instr_ts_gclk_div", .conv_table = omap5430_adc_to_temp, .adc_start_val = OMAP5430_ADC_START_VALUE, .adc_end_val = OMAP5430_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .report_temperature = ti_thermal_report_sensor_temperature, .sensors = { { .registers = &omap5430_mpu_temp_sensor_registers, .ts_data = &omap5430_mpu_temp_sensor_data, .domain = "cpu", .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_5430_CPU, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_5430_CPU, }, { .registers = &omap5430_gpu_temp_sensor_registers, .ts_data = &omap5430_gpu_temp_sensor_data, .domain = "gpu", .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_5430_GPU, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_5430_GPU, }, { .registers = &omap5430_core_temp_sensor_registers, .ts_data = &omap5430_core_temp_sensor_data, .domain = "core", }, }, .sensor_count = 3, };
linux-master
drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2021, Linaro Limited. All rights reserved. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/firmware/qcom/qcom_scm.h> #define LMH_NODE_DCVS 0x44435653 #define LMH_CLUSTER0_NODE_ID 0x6370302D #define LMH_CLUSTER1_NODE_ID 0x6370312D #define LMH_SUB_FN_THERMAL 0x54484D4C #define LMH_SUB_FN_CRNT 0x43524E54 #define LMH_SUB_FN_REL 0x52454C00 #define LMH_SUB_FN_BCL 0x42434C00 #define LMH_ALGO_MODE_ENABLE 0x454E424C #define LMH_TH_HI_THRESHOLD 0x48494748 #define LMH_TH_LOW_THRESHOLD 0x4C4F5700 #define LMH_TH_ARM_THRESHOLD 0x41524D00 #define LMH_REG_DCVS_INTR_CLR 0x8 #define LMH_ENABLE_ALGOS 1 struct lmh_hw_data { void __iomem *base; struct irq_domain *domain; int irq; }; static irqreturn_t lmh_handle_irq(int hw_irq, void *data) { struct lmh_hw_data *lmh_data = data; int irq = irq_find_mapping(lmh_data->domain, 0); /* Call the cpufreq driver to handle the interrupt */ if (irq) generic_handle_irq(irq); return IRQ_HANDLED; } static void lmh_enable_interrupt(struct irq_data *d) { struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d); /* Clear the existing interrupt */ writel(0xff, lmh_data->base + LMH_REG_DCVS_INTR_CLR); enable_irq(lmh_data->irq); } static void lmh_disable_interrupt(struct irq_data *d) { struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d); disable_irq_nosync(lmh_data->irq); } static struct irq_chip lmh_irq_chip = { .name = "lmh", .irq_enable = lmh_enable_interrupt, .irq_disable = lmh_disable_interrupt }; static int lmh_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct lmh_hw_data *lmh_data = d->host_data; irq_set_chip_and_handler(irq, &lmh_irq_chip, handle_simple_irq); irq_set_chip_data(irq, lmh_data); return 0; } static const struct irq_domain_ops lmh_irq_ops = { .map = lmh_irq_map, .xlate = irq_domain_xlate_onecell, }; static int lmh_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *cpu_node; struct lmh_hw_data *lmh_data; int temp_low, temp_high, temp_arm, cpu_id, ret; unsigned int enable_alg; u32 node_id; lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL); if (!lmh_data) return -ENOMEM; lmh_data->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lmh_data->base)) return PTR_ERR(lmh_data->base); cpu_node = of_parse_phandle(np, "cpus", 0); if (!cpu_node) return -EINVAL; cpu_id = of_cpu_node_to_id(cpu_node); of_node_put(cpu_node); ret = of_property_read_u32(np, "qcom,lmh-temp-high-millicelsius", &temp_high); if (ret) { dev_err(dev, "missing qcom,lmh-temp-high-millicelsius property\n"); return ret; } ret = of_property_read_u32(np, "qcom,lmh-temp-low-millicelsius", &temp_low); if (ret) { dev_err(dev, "missing qcom,lmh-temp-low-millicelsius property\n"); return ret; } ret = of_property_read_u32(np, "qcom,lmh-temp-arm-millicelsius", &temp_arm); if (ret) { dev_err(dev, "missing qcom,lmh-temp-arm-millicelsius property\n"); return ret; } /* * Only sdm845 has lmh hardware currently enabled from hlos. If this is needed * for other platforms, revisit this to check if the <cpu-id, node-id> should be part * of a dt match table. */ if (cpu_id == 0) { node_id = LMH_CLUSTER0_NODE_ID; } else if (cpu_id == 4) { node_id = LMH_CLUSTER1_NODE_ID; } else { dev_err(dev, "Wrong CPU id associated with LMh node\n"); return -EINVAL; } if (!qcom_scm_lmh_dcvsh_available()) return -EINVAL; enable_alg = (uintptr_t)of_device_get_match_data(dev); if (enable_alg) { ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1, LMH_NODE_DCVS, node_id, 0); if (ret) dev_err(dev, "Error %d enabling current subfunction\n", ret); ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1, LMH_NODE_DCVS, node_id, 0); if (ret) dev_err(dev, "Error %d enabling reliability subfunction\n", ret); ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1, LMH_NODE_DCVS, node_id, 0); if (ret) dev_err(dev, "Error %d enabling BCL subfunction\n", ret); ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1, LMH_NODE_DCVS, node_id, 0); if (ret) { dev_err(dev, "Error %d enabling thermal subfunction\n", ret); return ret; } ret = qcom_scm_lmh_profile_change(0x1); if (ret) { dev_err(dev, "Error %d changing profile\n", ret); return ret; } } /* Set default thermal trips */ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_ARM_THRESHOLD, temp_arm, LMH_NODE_DCVS, node_id, 0); if (ret) { dev_err(dev, "Error setting thermal ARM threshold%d\n", ret); return ret; } ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_HI_THRESHOLD, temp_high, LMH_NODE_DCVS, node_id, 0); if (ret) { dev_err(dev, "Error setting thermal HI threshold%d\n", ret); return ret; } ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_LOW_THRESHOLD, temp_low, LMH_NODE_DCVS, node_id, 0); if (ret) { dev_err(dev, "Error setting thermal ARM threshold%d\n", ret); return ret; } lmh_data->irq = platform_get_irq(pdev, 0); lmh_data->domain = irq_domain_add_linear(np, 1, &lmh_irq_ops, lmh_data); if (!lmh_data->domain) { dev_err(dev, "Error adding irq_domain\n"); return -EINVAL; } /* Disable the irq and let cpufreq enable it when ready to handle the interrupt */ irq_set_status_flags(lmh_data->irq, IRQ_NOAUTOEN); ret = devm_request_irq(dev, lmh_data->irq, lmh_handle_irq, IRQF_ONESHOT | IRQF_NO_SUSPEND, "lmh-irq", lmh_data); if (ret) { dev_err(dev, "Error %d registering irq %x\n", ret, lmh_data->irq); irq_domain_remove(lmh_data->domain); return ret; } return 0; } static const struct of_device_id lmh_table[] = { { .compatible = "qcom,sc8180x-lmh", }, { .compatible = "qcom,sdm845-lmh", .data = (void *)LMH_ENABLE_ALGOS}, { .compatible = "qcom,sm8150-lmh", }, {} }; MODULE_DEVICE_TABLE(of, lmh_table); static struct platform_driver lmh_driver = { .probe = lmh_probe, .driver = { .name = "qcom-lmh", .of_match_table = lmh_table, .suppress_bind_attrs = true, }, }; module_platform_driver(lmh_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("QCOM LMh driver");
linux-master
drivers/thermal/qcom/lmh.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. * Copyright (c) 2018, Linaro Limited */ #include <linux/bitops.h> #include <linux/regmap.h> #include "tsens.h" /* ----- SROT ------ */ #define SROT_HW_VER_OFF 0x0000 #define SROT_CTRL_OFF 0x0004 /* ----- TM ------ */ #define TM_INT_EN_OFF 0x0004 #define TM_UPPER_LOWER_INT_STATUS_OFF 0x0008 #define TM_UPPER_LOWER_INT_CLEAR_OFF 0x000c #define TM_UPPER_LOWER_INT_MASK_OFF 0x0010 #define TM_CRITICAL_INT_STATUS_OFF 0x0014 #define TM_CRITICAL_INT_CLEAR_OFF 0x0018 #define TM_CRITICAL_INT_MASK_OFF 0x001c #define TM_Sn_UPPER_LOWER_THRESHOLD_OFF 0x0020 #define TM_Sn_CRITICAL_THRESHOLD_OFF 0x0060 #define TM_Sn_STATUS_OFF 0x00a0 #define TM_TRDY_OFF 0x00e4 #define TM_WDOG_LOG_OFF 0x013c /* v2.x: 8996, 8998, sdm845 */ static struct tsens_features tsens_v2_feat = { .ver_major = VER_2_X, .crit_int = 1, .combo_int = 0, .adc = 0, .srot_split = 1, .max_sensors = 16, .trip_min_temp = -40000, .trip_max_temp = 120000, }; static struct tsens_features ipq8074_feat = { .ver_major = VER_2_X, .crit_int = 1, .combo_int = 1, .adc = 0, .srot_split = 1, .max_sensors = 16, .trip_min_temp = 0, .trip_max_temp = 204000, }; static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = { /* ----- SROT ------ */ /* VERSION */ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31), [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27), [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15), /* CTRL_OFF */ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0), [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1), /* ----- TM ------ */ /* INTERRUPT ENABLE */ /* v2 has separate enables for UPPER/LOWER/CRITICAL interrupts */ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2), /* TEMPERATURE THRESHOLDS */ REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11), REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23), REG_FIELD_FOR_EACH_SENSOR16(CRIT_THRESH, TM_Sn_CRITICAL_THRESHOLD_OFF, 0, 11), /* INTERRUPTS [CLEAR/STATUS/MASK] */ REG_FIELD_SPLIT_BITS_0_15(LOW_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF), REG_FIELD_SPLIT_BITS_0_15(LOW_INT_CLEAR, TM_UPPER_LOWER_INT_CLEAR_OFF), REG_FIELD_SPLIT_BITS_0_15(LOW_INT_MASK, TM_UPPER_LOWER_INT_MASK_OFF), REG_FIELD_SPLIT_BITS_16_31(UP_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF), REG_FIELD_SPLIT_BITS_16_31(UP_INT_CLEAR, TM_UPPER_LOWER_INT_CLEAR_OFF), REG_FIELD_SPLIT_BITS_16_31(UP_INT_MASK, TM_UPPER_LOWER_INT_MASK_OFF), REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_STATUS, TM_CRITICAL_INT_STATUS_OFF), REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_CLEAR, TM_CRITICAL_INT_CLEAR_OFF), REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_MASK, TM_CRITICAL_INT_MASK_OFF), /* WATCHDOG on v2.3 or later */ [WDOG_BARK_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 31, 31), [WDOG_BARK_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 31, 31), [WDOG_BARK_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 31, 31), [CC_MON_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 30, 30), [CC_MON_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 30, 30), [CC_MON_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 30, 30), [WDOG_BARK_COUNT] = REG_FIELD(TM_WDOG_LOG_OFF, 0, 7), /* Sn_STATUS */ REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11), REG_FIELD_FOR_EACH_SENSOR16(VALID, TM_Sn_STATUS_OFF, 21, 21), /* xxx_STATUS bits: 1 == threshold violated */ REG_FIELD_FOR_EACH_SENSOR16(MIN_STATUS, TM_Sn_STATUS_OFF, 16, 16), REG_FIELD_FOR_EACH_SENSOR16(LOWER_STATUS, TM_Sn_STATUS_OFF, 17, 17), REG_FIELD_FOR_EACH_SENSOR16(UPPER_STATUS, TM_Sn_STATUS_OFF, 18, 18), REG_FIELD_FOR_EACH_SENSOR16(CRITICAL_STATUS, TM_Sn_STATUS_OFF, 19, 19), REG_FIELD_FOR_EACH_SENSOR16(MAX_STATUS, TM_Sn_STATUS_OFF, 20, 20), /* TRDY: 1=ready, 0=in progress */ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0), }; static const struct tsens_ops ops_generic_v2 = { .init = init_common, .get_temp = get_temp_tsens_valid, }; struct tsens_plat_data data_tsens_v2 = { .ops = &ops_generic_v2, .feat = &tsens_v2_feat, .fields = tsens_v2_regfields, }; struct tsens_plat_data data_ipq8074 = { .ops = &ops_generic_v2, .feat = &ipq8074_feat, .fields = tsens_v2_regfields, }; /* Kept around for backward compatibility with old msm8996.dtsi */ struct tsens_plat_data data_8996 = { .num_sensors = 13, .ops = &ops_generic_v2, .feat = &tsens_v2_feat, .fields = tsens_v2_regfields, };
linux-master
drivers/thermal/qcom/tsens-v2.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. */ #include <linux/bitfield.h> #include <linux/nvmem-consumer.h> #include <linux/platform_device.h> #include "tsens.h" /* ----- SROT ------ */ #define SROT_CTRL_OFF 0x0000 /* ----- TM ------ */ #define TM_INT_EN_OFF 0x0000 #define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004 #define TM_Sn_STATUS_OFF 0x0030 #define TM_TRDY_OFF 0x005c /* extra data for 8974 */ #define BKP_SEL 0x3 #define BKP_REDUN_SEL 0xe0000000 #define BIT_APPEND 0x3 static struct tsens_legacy_calibration_format tsens_8916_nvmem = { .base_len = 7, .base_shift = 3, .sp_len = 5, .mode = { 0, 29, 1 }, .invalid = { 0, 31, 1 }, .base = { { 0, 0 }, { 1, 25 } }, .sp = { { { 0, 7 }, { 0, 12 } }, { { 0, 17 }, { 0, 22 } }, { { 0, 27 }, { 1, 0 } }, { { 1, 5 }, { 1, 10 } }, { { 1, 15 }, { 1, 20 } }, }, }; static struct tsens_legacy_calibration_format tsens_8974_nvmem = { .base_len = 8, .base_shift = 2, .sp_len = 6, .mode = { 1, 30 }, .invalid = { 3, 30 }, .base = { { 0, 0 }, { 2, 12 } }, .sp = { { { 0, 8 }, { 2, 20 } }, { { 0, 14 }, { 2, 26 } }, { { 0, 20 }, { 3, 0 } }, { { 0, 26 }, { 3, 6 } }, { { 1, 0 }, { 3, 12 } }, { { 1, 6 }, { 3, 18 } }, { { 1, 12 }, { 3, 24 } }, { { 1, 18 }, { 4, 0 } }, { { 1, 24 }, { 4, 6 } }, { { 2, 0 }, { 4, 12 } }, { { 2, 6 }, { 4, 18 } }, }, }; static struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = { .base_len = 8, .base_shift = 2, .sp_len = 6, .mode = { 4, 30, 1 }, .invalid = { 5, 30, 1 }, .base = { { 0, 0 }, { 2, 18 } }, .sp = { { { 0, 8 }, { 2, 26 } }, { { 0, 14 }, { 3, 0 } }, { { 0, 20 }, { 3, 6 } }, { { 0, 26 }, { 3, 12 } }, { { 1, 0 }, { 3, 18 } }, { { 1, 6 }, { 3, 24, 1 } }, { { 1, 12 }, { 4, 0, 1 } }, { { 1, 18 }, { 4, 6, 1 } }, { { 2, 0 }, { 4, 12, 1 } }, { { 2, 6 }, { 4, 18, 1 } }, { { 2, 12 }, { 4, 24, 1 } }, }, }; static int calibrate_8916(struct tsens_priv *priv) { u32 p1[5], p2[5]; u32 *qfprom_cdata, *qfprom_csel; int mode, ret; ret = tsens_calibrate_nvmem(priv, 3); if (!ret) return 0; qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib"); if (IS_ERR(qfprom_cdata)) return PTR_ERR(qfprom_cdata); qfprom_csel = (u32 *)qfprom_read(priv->dev, "calib_sel"); if (IS_ERR(qfprom_csel)) { kfree(qfprom_cdata); return PTR_ERR(qfprom_csel); } mode = tsens_read_calibration_legacy(priv, &tsens_8916_nvmem, p1, p2, qfprom_cdata, qfprom_csel); compute_intercept_slope(priv, p1, p2, mode); kfree(qfprom_cdata); kfree(qfprom_csel); return 0; } static void fixup_8974_points(int mode, u32 *p1, u32 *p2) { int i; if (mode == NO_PT_CALIB) { p1[0] += 2; p1[1] += 9; p1[2] += 3; p1[3] += 9; p1[4] += 5; p1[5] += 9; p1[6] += 7; p1[7] += 10; p1[8] += 8; p1[9] += 9; p1[10] += 8; } else { for (i = 0; i < 11; i++) { /* * ONE_PT_CALIB requires using addition here instead of * using OR operation. */ p1[i] += BIT_APPEND; p2[i] += BIT_APPEND; } } } static int calibrate_8974_nvmem(struct tsens_priv *priv) { u32 p1[11], p2[11]; u32 backup; int ret, mode; ret = nvmem_cell_read_variable_le_u32(priv->dev, "use_backup", &backup); if (ret == -ENOENT) dev_warn(priv->dev, "Please migrate to separate nvmem cells for calibration data\n"); if (ret < 0) return ret; mode = tsens_read_calibration(priv, 2, p1, p2, backup == BKP_SEL); if (mode < 0) return mode; fixup_8974_points(mode, p1, p2); compute_intercept_slope(priv, p1, p2, mode); return 0; } static int calibrate_8974(struct tsens_priv *priv) { u32 p1[11], p2[11]; u32 *calib, *bkp; u32 calib_redun_sel; int mode, ret; ret = calibrate_8974_nvmem(priv); if (ret == 0) return 0; calib = (u32 *)qfprom_read(priv->dev, "calib"); if (IS_ERR(calib)) return PTR_ERR(calib); bkp = (u32 *)qfprom_read(priv->dev, "calib_backup"); if (IS_ERR(bkp)) { kfree(calib); return PTR_ERR(bkp); } calib_redun_sel = FIELD_GET(BKP_REDUN_SEL, bkp[1]); if (calib_redun_sel == BKP_SEL) mode = tsens_read_calibration_legacy(priv, &tsens_8974_backup_nvmem, p1, p2, bkp, calib); else mode = tsens_read_calibration_legacy(priv, &tsens_8974_nvmem, p1, p2, calib, NULL); fixup_8974_points(mode, p1, p2); compute_intercept_slope(priv, p1, p2, mode); kfree(calib); kfree(bkp); return 0; } static int __init init_8226(struct tsens_priv *priv) { priv->sensor[0].slope = 2901; priv->sensor[1].slope = 2846; priv->sensor[2].slope = 3038; priv->sensor[3].slope = 2955; priv->sensor[4].slope = 2901; priv->sensor[5].slope = 2846; return init_common(priv); } static int __init init_8909(struct tsens_priv *priv) { int i; for (i = 0; i < priv->num_sensors; ++i) priv->sensor[i].slope = 3000; priv->sensor[0].p1_calib_offset = 0; priv->sensor[0].p2_calib_offset = 0; priv->sensor[1].p1_calib_offset = -10; priv->sensor[1].p2_calib_offset = -6; priv->sensor[2].p1_calib_offset = 0; priv->sensor[2].p2_calib_offset = 0; priv->sensor[3].p1_calib_offset = -9; priv->sensor[3].p2_calib_offset = -9; priv->sensor[4].p1_calib_offset = -8; priv->sensor[4].p2_calib_offset = -10; return init_common(priv); } static int __init init_8939(struct tsens_priv *priv) { priv->sensor[0].slope = 2911; priv->sensor[1].slope = 2789; priv->sensor[2].slope = 2906; priv->sensor[3].slope = 2763; priv->sensor[4].slope = 2922; priv->sensor[5].slope = 2867; priv->sensor[6].slope = 2833; priv->sensor[7].slope = 2838; priv->sensor[8].slope = 2840; /* priv->sensor[9].slope = 2852; */ return init_common(priv); } static int __init init_9607(struct tsens_priv *priv) { int i; for (i = 0; i < priv->num_sensors; ++i) priv->sensor[i].slope = 3000; priv->sensor[0].p1_calib_offset = 1; priv->sensor[0].p2_calib_offset = 1; priv->sensor[1].p1_calib_offset = -4; priv->sensor[1].p2_calib_offset = -2; priv->sensor[2].p1_calib_offset = 4; priv->sensor[2].p2_calib_offset = 8; priv->sensor[3].p1_calib_offset = -3; priv->sensor[3].p2_calib_offset = -5; priv->sensor[4].p1_calib_offset = -4; priv->sensor[4].p2_calib_offset = -4; return init_common(priv); } /* v0.1: 8226, 8909, 8916, 8939, 8974, 9607 */ static struct tsens_features tsens_v0_1_feat = { .ver_major = VER_0_1, .crit_int = 0, .combo_int = 0, .adc = 1, .srot_split = 1, .max_sensors = 11, .trip_min_temp = -40000, .trip_max_temp = 120000, }; static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = { /* ----- SROT ------ */ /* No VERSION information */ /* CTRL_OFFSET */ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0), [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1), /* ----- TM ------ */ /* INTERRUPT ENABLE */ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0), /* UPPER/LOWER TEMPERATURE THRESHOLDS */ REG_FIELD_FOR_EACH_SENSOR11(LOW_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 0, 9), REG_FIELD_FOR_EACH_SENSOR11(UP_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 10, 19), /* UPPER/LOWER INTERRUPTS [CLEAR/STATUS] */ REG_FIELD_FOR_EACH_SENSOR11(LOW_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 20, 20), REG_FIELD_FOR_EACH_SENSOR11(UP_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 21, 21), /* NO CRITICAL INTERRUPT SUPPORT on v0.1 */ /* Sn_STATUS */ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9), /* No VALID field on v0.1 */ /* xxx_STATUS bits: 1 == threshold violated */ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10), REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11), REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12), /* No CRITICAL field on v0.1 */ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13), /* TRDY: 1=ready, 0=in progress */ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0), }; static const struct tsens_ops ops_v0_1 = { .init = init_common, .calibrate = tsens_calibrate_common, .get_temp = get_temp_common, }; static const struct tsens_ops ops_8226 = { .init = init_8226, .calibrate = tsens_calibrate_common, .get_temp = get_temp_common, }; struct tsens_plat_data data_8226 = { .num_sensors = 6, .ops = &ops_8226, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, }; static const struct tsens_ops ops_8909 = { .init = init_8909, .calibrate = tsens_calibrate_common, .get_temp = get_temp_common, }; struct tsens_plat_data data_8909 = { .num_sensors = 5, .ops = &ops_8909, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, }; static const struct tsens_ops ops_8916 = { .init = init_common, .calibrate = calibrate_8916, .get_temp = get_temp_common, }; struct tsens_plat_data data_8916 = { .num_sensors = 5, .ops = &ops_8916, .hw_ids = (unsigned int []){0, 1, 2, 4, 5 }, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, }; static const struct tsens_ops ops_8939 = { .init = init_8939, .calibrate = tsens_calibrate_common, .get_temp = get_temp_common, }; struct tsens_plat_data data_8939 = { .num_sensors = 9, .ops = &ops_8939, .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, /* 10 */ }, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, }; static const struct tsens_ops ops_8974 = { .init = init_common, .calibrate = calibrate_8974, .get_temp = get_temp_common, }; struct tsens_plat_data data_8974 = { .num_sensors = 11, .ops = &ops_8974, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, }; static const struct tsens_ops ops_9607 = { .init = init_9607, .calibrate = tsens_calibrate_common, .get_temp = get_temp_common, }; struct tsens_plat_data data_9607 = { .num_sensors = 5, .ops = &ops_9607, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, };
linux-master
drivers/thermal/qcom/tsens-v0_1.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2020 Linaro Limited * * Based on original driver: * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/bitfield.h> #include <linux/iio/adc/qcom-vadc-common.h> #include <linux/iio/consumer.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/thermal.h> #include <asm/unaligned.h> #include "../thermal_hwmon.h" /* * Thermal monitoring block consists of 8 (ADC_TM5_NUM_CHANNELS) channels. Each * channel is programmed to use one of ADC channels for voltage comparison. * Voltages are programmed using ADC codes, so we have to convert temp to * voltage and then to ADC code value. * * Configuration of TM channels must match configuration of corresponding ADC * channels. */ #define ADC5_MAX_CHANNEL 0xc0 #define ADC_TM5_NUM_CHANNELS 8 #define ADC_TM5_STATUS_LOW 0x0a #define ADC_TM5_STATUS_HIGH 0x0b #define ADC_TM5_NUM_BTM 0x0f #define ADC_TM5_ADC_DIG_PARAM 0x42 #define ADC_TM5_FAST_AVG_CTL (ADC_TM5_ADC_DIG_PARAM + 1) #define ADC_TM5_FAST_AVG_EN BIT(7) #define ADC_TM5_MEAS_INTERVAL_CTL (ADC_TM5_ADC_DIG_PARAM + 2) #define ADC_TM5_TIMER1 3 /* 3.9ms */ #define ADC_TM5_MEAS_INTERVAL_CTL2 (ADC_TM5_ADC_DIG_PARAM + 3) #define ADC_TM5_MEAS_INTERVAL_CTL2_MASK 0xf0 #define ADC_TM5_TIMER2 10 /* 1 second */ #define ADC_TM5_MEAS_INTERVAL_CTL3_MASK 0xf #define ADC_TM5_TIMER3 4 /* 4 second */ #define ADC_TM_EN_CTL1 0x46 #define ADC_TM_EN BIT(7) #define ADC_TM_CONV_REQ 0x47 #define ADC_TM_CONV_REQ_EN BIT(7) #define ADC_TM5_M_CHAN_BASE 0x60 #define ADC_TM5_M_ADC_CH_SEL_CTL(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 0) #define ADC_TM5_M_LOW_THR0(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 1) #define ADC_TM5_M_LOW_THR1(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 2) #define ADC_TM5_M_HIGH_THR0(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 3) #define ADC_TM5_M_HIGH_THR1(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 4) #define ADC_TM5_M_MEAS_INTERVAL_CTL(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 5) #define ADC_TM5_M_CTL(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 6) #define ADC_TM5_M_CTL_HW_SETTLE_DELAY_MASK 0xf #define ADC_TM5_M_CTL_CAL_SEL_MASK 0x30 #define ADC_TM5_M_CTL_CAL_VAL 0x40 #define ADC_TM5_M_EN(n) (ADC_TM5_M_CHAN_BASE + ((n) * 8) + 7) #define ADC_TM5_M_MEAS_EN BIT(7) #define ADC_TM5_M_HIGH_THR_INT_EN BIT(1) #define ADC_TM5_M_LOW_THR_INT_EN BIT(0) #define ADC_TM_GEN2_STATUS1 0x08 #define ADC_TM_GEN2_STATUS_LOW_SET 0x09 #define ADC_TM_GEN2_STATUS_LOW_CLR 0x0a #define ADC_TM_GEN2_STATUS_HIGH_SET 0x0b #define ADC_TM_GEN2_STATUS_HIGH_CLR 0x0c #define ADC_TM_GEN2_CFG_HS_SET 0x0d #define ADC_TM_GEN2_CFG_HS_FLAG BIT(0) #define ADC_TM_GEN2_CFG_HS_CLR 0x0e #define ADC_TM_GEN2_SID 0x40 #define ADC_TM_GEN2_CH_CTL 0x41 #define ADC_TM_GEN2_TM_CH_SEL GENMASK(7, 5) #define ADC_TM_GEN2_MEAS_INT_SEL GENMASK(3, 2) #define ADC_TM_GEN2_ADC_DIG_PARAM 0x42 #define ADC_TM_GEN2_CTL_CAL_SEL GENMASK(5, 4) #define ADC_TM_GEN2_CTL_DEC_RATIO_MASK GENMASK(3, 2) #define ADC_TM_GEN2_FAST_AVG_CTL 0x43 #define ADC_TM_GEN2_FAST_AVG_EN BIT(7) #define ADC_TM_GEN2_ADC_CH_SEL_CTL 0x44 #define ADC_TM_GEN2_DELAY_CTL 0x45 #define ADC_TM_GEN2_HW_SETTLE_DELAY GENMASK(3, 0) #define ADC_TM_GEN2_EN_CTL1 0x46 #define ADC_TM_GEN2_EN BIT(7) #define ADC_TM_GEN2_CONV_REQ 0x47 #define ADC_TM_GEN2_CONV_REQ_EN BIT(7) #define ADC_TM_GEN2_LOW_THR0 0x49 #define ADC_TM_GEN2_LOW_THR1 0x4a #define ADC_TM_GEN2_HIGH_THR0 0x4b #define ADC_TM_GEN2_HIGH_THR1 0x4c #define ADC_TM_GEN2_LOWER_MASK(n) ((n) & GENMASK(7, 0)) #define ADC_TM_GEN2_UPPER_MASK(n) (((n) & GENMASK(15, 8)) >> 8) #define ADC_TM_GEN2_MEAS_IRQ_EN 0x4d #define ADC_TM_GEN2_MEAS_EN BIT(7) #define ADC_TM5_GEN2_HIGH_THR_INT_EN BIT(1) #define ADC_TM5_GEN2_LOW_THR_INT_EN BIT(0) #define ADC_TM_GEN2_MEAS_INT_LSB 0x50 #define ADC_TM_GEN2_MEAS_INT_MSB 0x51 #define ADC_TM_GEN2_MEAS_INT_MODE 0x52 #define ADC_TM_GEN2_Mn_DATA0(n) ((n * 2) + 0xa0) #define ADC_TM_GEN2_Mn_DATA1(n) ((n * 2) + 0xa1) #define ADC_TM_GEN2_DATA_SHIFT 8 enum adc5_timer_select { ADC5_TIMER_SEL_1 = 0, ADC5_TIMER_SEL_2, ADC5_TIMER_SEL_3, ADC5_TIMER_SEL_NONE, }; enum adc5_gen { ADC_TM5, ADC_TM_HC, ADC_TM5_GEN2, ADC_TM5_MAX }; enum adc_tm5_cal_method { ADC_TM5_NO_CAL = 0, ADC_TM5_RATIOMETRIC_CAL, ADC_TM5_ABSOLUTE_CAL }; enum adc_tm_gen2_time_select { MEAS_INT_50MS = 0, MEAS_INT_100MS, MEAS_INT_1S, MEAS_INT_SET, MEAS_INT_NONE, }; struct adc_tm5_chip; struct adc_tm5_channel; struct adc_tm5_data { const u32 full_scale_code_volt; unsigned int *decimation; unsigned int *hw_settle; int (*disable_channel)(struct adc_tm5_channel *channel); int (*configure)(struct adc_tm5_channel *channel, int low, int high); irqreturn_t (*isr)(int irq, void *data); int (*init)(struct adc_tm5_chip *chip); char *irq_name; int gen; }; /** * struct adc_tm5_channel - ADC Thermal Monitoring channel data. * @channel: channel number. * @adc_channel: corresponding ADC channel number. * @cal_method: calibration method. * @prescale: channel scaling performed on the input signal. * @hw_settle_time: the time between AMUX being configured and the * start of conversion. * @decimation: sampling rate supported for the channel. * @avg_samples: ability to provide single result from the ADC * that is an average of multiple measurements. * @high_thr_en: channel upper voltage threshold enable state. * @low_thr_en: channel lower voltage threshold enable state. * @meas_en: recurring measurement enable state * @iio: IIO channel instance used by this channel. * @chip: ADC TM chip instance. * @tzd: thermal zone device used by this channel. */ struct adc_tm5_channel { unsigned int channel; unsigned int adc_channel; enum adc_tm5_cal_method cal_method; unsigned int prescale; unsigned int hw_settle_time; unsigned int decimation; /* For Gen2 ADC_TM */ unsigned int avg_samples; /* For Gen2 ADC_TM */ bool high_thr_en; /* For Gen2 ADC_TM */ bool low_thr_en; /* For Gen2 ADC_TM */ bool meas_en; /* For Gen2 ADC_TM */ struct iio_channel *iio; struct adc_tm5_chip *chip; struct thermal_zone_device *tzd; }; /** * struct adc_tm5_chip - ADC Thermal Monitoring properties * @regmap: SPMI ADC5 Thermal Monitoring peripheral register map field. * @dev: SPMI ADC5 device. * @data: software configuration data. * @channels: array of ADC TM channel data. * @nchannels: amount of channels defined/allocated * @decimation: sampling rate supported for the channel. * Applies to all channels, used only on Gen1 ADC_TM. * @avg_samples: ability to provide single result from the ADC * that is an average of multiple measurements. Applies to all * channels, used only on Gen1 ADC_TM. * @base: base address of TM registers. * @adc_mutex_lock: ADC_TM mutex lock, used only on Gen2 ADC_TM. * It is used to ensure only one ADC channel configuration * is done at a time using the shared set of configuration * registers. */ struct adc_tm5_chip { struct regmap *regmap; struct device *dev; const struct adc_tm5_data *data; struct adc_tm5_channel *channels; unsigned int nchannels; unsigned int decimation; unsigned int avg_samples; u16 base; struct mutex adc_mutex_lock; }; static int adc_tm5_read(struct adc_tm5_chip *adc_tm, u16 offset, u8 *data, int len) { return regmap_bulk_read(adc_tm->regmap, adc_tm->base + offset, data, len); } static int adc_tm5_write(struct adc_tm5_chip *adc_tm, u16 offset, u8 *data, int len) { return regmap_bulk_write(adc_tm->regmap, adc_tm->base + offset, data, len); } static int adc_tm5_reg_update(struct adc_tm5_chip *adc_tm, u16 offset, u8 mask, u8 val) { return regmap_write_bits(adc_tm->regmap, adc_tm->base + offset, mask, val); } static irqreturn_t adc_tm5_isr(int irq, void *data) { struct adc_tm5_chip *chip = data; u8 status_low, status_high, ctl; int ret, i; ret = adc_tm5_read(chip, ADC_TM5_STATUS_LOW, &status_low, sizeof(status_low)); if (unlikely(ret)) { dev_err(chip->dev, "read status low failed: %d\n", ret); return IRQ_HANDLED; } ret = adc_tm5_read(chip, ADC_TM5_STATUS_HIGH, &status_high, sizeof(status_high)); if (unlikely(ret)) { dev_err(chip->dev, "read status high failed: %d\n", ret); return IRQ_HANDLED; } for (i = 0; i < chip->nchannels; i++) { bool upper_set = false, lower_set = false; unsigned int ch = chip->channels[i].channel; /* No TZD, we warned at the boot time */ if (!chip->channels[i].tzd) continue; ret = adc_tm5_read(chip, ADC_TM5_M_EN(ch), &ctl, sizeof(ctl)); if (unlikely(ret)) { dev_err(chip->dev, "ctl read failed: %d, channel %d\n", ret, i); continue; } if (!(ctl & ADC_TM5_M_MEAS_EN)) continue; lower_set = (status_low & BIT(ch)) && (ctl & ADC_TM5_M_LOW_THR_INT_EN); upper_set = (status_high & BIT(ch)) && (ctl & ADC_TM5_M_HIGH_THR_INT_EN); if (upper_set || lower_set) thermal_zone_device_update(chip->channels[i].tzd, THERMAL_EVENT_UNSPECIFIED); } return IRQ_HANDLED; } static irqreturn_t adc_tm5_gen2_isr(int irq, void *data) { struct adc_tm5_chip *chip = data; u8 status_low, status_high; int ret, i; ret = adc_tm5_read(chip, ADC_TM_GEN2_STATUS_LOW_CLR, &status_low, sizeof(status_low)); if (ret) { dev_err(chip->dev, "read status_low failed: %d\n", ret); return IRQ_HANDLED; } ret = adc_tm5_read(chip, ADC_TM_GEN2_STATUS_HIGH_CLR, &status_high, sizeof(status_high)); if (ret) { dev_err(chip->dev, "read status_high failed: %d\n", ret); return IRQ_HANDLED; } ret = adc_tm5_write(chip, ADC_TM_GEN2_STATUS_LOW_CLR, &status_low, sizeof(status_low)); if (ret < 0) { dev_err(chip->dev, "clear status low failed with %d\n", ret); return IRQ_HANDLED; } ret = adc_tm5_write(chip, ADC_TM_GEN2_STATUS_HIGH_CLR, &status_high, sizeof(status_high)); if (ret < 0) { dev_err(chip->dev, "clear status high failed with %d\n", ret); return IRQ_HANDLED; } for (i = 0; i < chip->nchannels; i++) { bool upper_set = false, lower_set = false; unsigned int ch = chip->channels[i].channel; /* No TZD, we warned at the boot time */ if (!chip->channels[i].tzd) continue; if (!chip->channels[i].meas_en) continue; lower_set = (status_low & BIT(ch)) && (chip->channels[i].low_thr_en); upper_set = (status_high & BIT(ch)) && (chip->channels[i].high_thr_en); if (upper_set || lower_set) thermal_zone_device_update(chip->channels[i].tzd, THERMAL_EVENT_UNSPECIFIED); } return IRQ_HANDLED; } static int adc_tm5_get_temp(struct thermal_zone_device *tz, int *temp) { struct adc_tm5_channel *channel = thermal_zone_device_priv(tz); int ret; if (!channel || !channel->iio) return -EINVAL; ret = iio_read_channel_processed(channel->iio, temp); if (ret < 0) return ret; if (ret != IIO_VAL_INT) return -EINVAL; return 0; } static int adc_tm5_disable_channel(struct adc_tm5_channel *channel) { struct adc_tm5_chip *chip = channel->chip; unsigned int reg = ADC_TM5_M_EN(channel->channel); return adc_tm5_reg_update(chip, reg, ADC_TM5_M_MEAS_EN | ADC_TM5_M_HIGH_THR_INT_EN | ADC_TM5_M_LOW_THR_INT_EN, 0); } #define ADC_TM_GEN2_POLL_DELAY_MIN_US 100 #define ADC_TM_GEN2_POLL_DELAY_MAX_US 110 #define ADC_TM_GEN2_POLL_RETRY_COUNT 3 static int32_t adc_tm5_gen2_conv_req(struct adc_tm5_chip *chip) { int ret; u8 data; unsigned int count; data = ADC_TM_GEN2_EN; ret = adc_tm5_write(chip, ADC_TM_GEN2_EN_CTL1, &data, 1); if (ret < 0) { dev_err(chip->dev, "adc-tm enable failed with %d\n", ret); return ret; } data = ADC_TM_GEN2_CFG_HS_FLAG; ret = adc_tm5_write(chip, ADC_TM_GEN2_CFG_HS_SET, &data, 1); if (ret < 0) { dev_err(chip->dev, "adc-tm handshake failed with %d\n", ret); return ret; } data = ADC_TM_GEN2_CONV_REQ_EN; ret = adc_tm5_write(chip, ADC_TM_GEN2_CONV_REQ, &data, 1); if (ret < 0) { dev_err(chip->dev, "adc-tm request conversion failed with %d\n", ret); return ret; } /* * SW sets a handshake bit and waits for PBS to clear it * before the next conversion request can be queued. */ for (count = 0; count < ADC_TM_GEN2_POLL_RETRY_COUNT; count++) { ret = adc_tm5_read(chip, ADC_TM_GEN2_CFG_HS_SET, &data, sizeof(data)); if (ret < 0) { dev_err(chip->dev, "adc-tm read failed with %d\n", ret); return ret; } if (!(data & ADC_TM_GEN2_CFG_HS_FLAG)) return ret; usleep_range(ADC_TM_GEN2_POLL_DELAY_MIN_US, ADC_TM_GEN2_POLL_DELAY_MAX_US); } dev_err(chip->dev, "adc-tm conversion request handshake timed out\n"); return -ETIMEDOUT; } static int adc_tm5_gen2_disable_channel(struct adc_tm5_channel *channel) { struct adc_tm5_chip *chip = channel->chip; int ret; u8 val; mutex_lock(&chip->adc_mutex_lock); channel->meas_en = false; channel->high_thr_en = false; channel->low_thr_en = false; ret = adc_tm5_read(chip, ADC_TM_GEN2_CH_CTL, &val, sizeof(val)); if (ret < 0) { dev_err(chip->dev, "adc-tm block read failed with %d\n", ret); goto disable_fail; } val &= ~ADC_TM_GEN2_TM_CH_SEL; val |= FIELD_PREP(ADC_TM_GEN2_TM_CH_SEL, channel->channel); ret = adc_tm5_write(chip, ADC_TM_GEN2_CH_CTL, &val, 1); if (ret < 0) { dev_err(chip->dev, "adc-tm channel disable failed with %d\n", ret); goto disable_fail; } val = 0; ret = adc_tm5_write(chip, ADC_TM_GEN2_MEAS_IRQ_EN, &val, 1); if (ret < 0) { dev_err(chip->dev, "adc-tm interrupt disable failed with %d\n", ret); goto disable_fail; } ret = adc_tm5_gen2_conv_req(channel->chip); if (ret < 0) dev_err(chip->dev, "adc-tm channel configure failed with %d\n", ret); disable_fail: mutex_unlock(&chip->adc_mutex_lock); return ret; } static int adc_tm5_enable(struct adc_tm5_chip *chip) { int ret; u8 data; data = ADC_TM_EN; ret = adc_tm5_write(chip, ADC_TM_EN_CTL1, &data, sizeof(data)); if (ret < 0) { dev_err(chip->dev, "adc-tm enable failed\n"); return ret; } data = ADC_TM_CONV_REQ_EN; ret = adc_tm5_write(chip, ADC_TM_CONV_REQ, &data, sizeof(data)); if (ret < 0) { dev_err(chip->dev, "adc-tm request conversion failed\n"); return ret; } return 0; } static int adc_tm5_configure(struct adc_tm5_channel *channel, int low, int high) { struct adc_tm5_chip *chip = channel->chip; u8 buf[8]; u16 reg = ADC_TM5_M_ADC_CH_SEL_CTL(channel->channel); int ret; ret = adc_tm5_read(chip, reg, buf, sizeof(buf)); if (ret) { dev_err(chip->dev, "channel %d params read failed: %d\n", channel->channel, ret); return ret; } buf[0] = channel->adc_channel; /* High temperature corresponds to low voltage threshold */ if (high != INT_MAX) { u16 adc_code = qcom_adc_tm5_temp_volt_scale(channel->prescale, chip->data->full_scale_code_volt, high); put_unaligned_le16(adc_code, &buf[1]); buf[7] |= ADC_TM5_M_LOW_THR_INT_EN; } else { buf[7] &= ~ADC_TM5_M_LOW_THR_INT_EN; } /* Low temperature corresponds to high voltage threshold */ if (low != -INT_MAX) { u16 adc_code = qcom_adc_tm5_temp_volt_scale(channel->prescale, chip->data->full_scale_code_volt, low); put_unaligned_le16(adc_code, &buf[3]); buf[7] |= ADC_TM5_M_HIGH_THR_INT_EN; } else { buf[7] &= ~ADC_TM5_M_HIGH_THR_INT_EN; } buf[5] = ADC5_TIMER_SEL_2; /* Set calibration select, hw_settle delay */ buf[6] &= ~ADC_TM5_M_CTL_HW_SETTLE_DELAY_MASK; buf[6] |= FIELD_PREP(ADC_TM5_M_CTL_HW_SETTLE_DELAY_MASK, channel->hw_settle_time); buf[6] &= ~ADC_TM5_M_CTL_CAL_SEL_MASK; buf[6] |= FIELD_PREP(ADC_TM5_M_CTL_CAL_SEL_MASK, channel->cal_method); buf[7] |= ADC_TM5_M_MEAS_EN; ret = adc_tm5_write(chip, reg, buf, sizeof(buf)); if (ret) { dev_err(chip->dev, "channel %d params write failed: %d\n", channel->channel, ret); return ret; } return adc_tm5_enable(chip); } static int adc_tm5_gen2_configure(struct adc_tm5_channel *channel, int low, int high) { struct adc_tm5_chip *chip = channel->chip; int ret; u8 buf[14]; u16 adc_code; mutex_lock(&chip->adc_mutex_lock); channel->meas_en = true; ret = adc_tm5_read(chip, ADC_TM_GEN2_SID, buf, sizeof(buf)); if (ret < 0) { dev_err(chip->dev, "adc-tm block read failed with %d\n", ret); goto config_fail; } /* Set SID from virtual channel number */ buf[0] = channel->adc_channel >> 8; /* Set TM channel number used and measurement interval */ buf[1] &= ~ADC_TM_GEN2_TM_CH_SEL; buf[1] |= FIELD_PREP(ADC_TM_GEN2_TM_CH_SEL, channel->channel); buf[1] &= ~ADC_TM_GEN2_MEAS_INT_SEL; buf[1] |= FIELD_PREP(ADC_TM_GEN2_MEAS_INT_SEL, MEAS_INT_1S); buf[2] &= ~ADC_TM_GEN2_CTL_DEC_RATIO_MASK; buf[2] |= FIELD_PREP(ADC_TM_GEN2_CTL_DEC_RATIO_MASK, channel->decimation); buf[2] &= ~ADC_TM_GEN2_CTL_CAL_SEL; buf[2] |= FIELD_PREP(ADC_TM_GEN2_CTL_CAL_SEL, channel->cal_method); buf[3] = channel->avg_samples | ADC_TM_GEN2_FAST_AVG_EN; buf[4] = channel->adc_channel & 0xff; buf[5] = channel->hw_settle_time & ADC_TM_GEN2_HW_SETTLE_DELAY; /* High temperature corresponds to low voltage threshold */ if (high != INT_MAX) { channel->low_thr_en = true; adc_code = qcom_adc_tm5_gen2_temp_res_scale(high); put_unaligned_le16(adc_code, &buf[9]); } else { channel->low_thr_en = false; } /* Low temperature corresponds to high voltage threshold */ if (low != -INT_MAX) { channel->high_thr_en = true; adc_code = qcom_adc_tm5_gen2_temp_res_scale(low); put_unaligned_le16(adc_code, &buf[11]); } else { channel->high_thr_en = false; } buf[13] = ADC_TM_GEN2_MEAS_EN; if (channel->high_thr_en) buf[13] |= ADC_TM5_GEN2_HIGH_THR_INT_EN; if (channel->low_thr_en) buf[13] |= ADC_TM5_GEN2_LOW_THR_INT_EN; ret = adc_tm5_write(chip, ADC_TM_GEN2_SID, buf, sizeof(buf)); if (ret) { dev_err(chip->dev, "channel %d params write failed: %d\n", channel->channel, ret); goto config_fail; } ret = adc_tm5_gen2_conv_req(channel->chip); if (ret < 0) dev_err(chip->dev, "adc-tm channel configure failed with %d\n", ret); config_fail: mutex_unlock(&chip->adc_mutex_lock); return ret; } static int adc_tm5_set_trips(struct thermal_zone_device *tz, int low, int high) { struct adc_tm5_channel *channel = thermal_zone_device_priv(tz); struct adc_tm5_chip *chip; int ret; if (!channel) return -EINVAL; chip = channel->chip; dev_dbg(chip->dev, "%d:low(mdegC):%d, high(mdegC):%d\n", channel->channel, low, high); if (high == INT_MAX && low <= -INT_MAX) ret = chip->data->disable_channel(channel); else ret = chip->data->configure(channel, low, high); return ret; } static const struct thermal_zone_device_ops adc_tm5_thermal_ops = { .get_temp = adc_tm5_get_temp, .set_trips = adc_tm5_set_trips, }; static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm) { unsigned int i; struct thermal_zone_device *tzd; for (i = 0; i < adc_tm->nchannels; i++) { adc_tm->channels[i].chip = adc_tm; tzd = devm_thermal_of_zone_register(adc_tm->dev, adc_tm->channels[i].channel, &adc_tm->channels[i], &adc_tm5_thermal_ops); if (IS_ERR(tzd)) { if (PTR_ERR(tzd) == -ENODEV) { dev_dbg(adc_tm->dev, "thermal sensor on channel %d is not used\n", adc_tm->channels[i].channel); continue; } dev_err(adc_tm->dev, "Error registering TZ zone for channel %d: %ld\n", adc_tm->channels[i].channel, PTR_ERR(tzd)); return PTR_ERR(tzd); } adc_tm->channels[i].tzd = tzd; devm_thermal_add_hwmon_sysfs(adc_tm->dev, tzd); } return 0; } static int adc_tm_hc_init(struct adc_tm5_chip *chip) { unsigned int i; u8 buf[2]; int ret; for (i = 0; i < chip->nchannels; i++) { if (chip->channels[i].channel >= ADC_TM5_NUM_CHANNELS) { dev_err(chip->dev, "Invalid channel %d\n", chip->channels[i].channel); return -EINVAL; } } buf[0] = chip->decimation; buf[1] = chip->avg_samples | ADC_TM5_FAST_AVG_EN; ret = adc_tm5_write(chip, ADC_TM5_ADC_DIG_PARAM, buf, sizeof(buf)); if (ret) dev_err(chip->dev, "block write failed: %d\n", ret); return ret; } static int adc_tm5_init(struct adc_tm5_chip *chip) { u8 buf[4], channels_available; int ret; unsigned int i; ret = adc_tm5_read(chip, ADC_TM5_NUM_BTM, &channels_available, sizeof(channels_available)); if (ret) { dev_err(chip->dev, "read failed for BTM channels\n"); return ret; } for (i = 0; i < chip->nchannels; i++) { if (chip->channels[i].channel >= channels_available) { dev_err(chip->dev, "Invalid channel %d\n", chip->channels[i].channel); return -EINVAL; } } buf[0] = chip->decimation; buf[1] = chip->avg_samples | ADC_TM5_FAST_AVG_EN; buf[2] = ADC_TM5_TIMER1; buf[3] = FIELD_PREP(ADC_TM5_MEAS_INTERVAL_CTL2_MASK, ADC_TM5_TIMER2) | FIELD_PREP(ADC_TM5_MEAS_INTERVAL_CTL3_MASK, ADC_TM5_TIMER3); ret = adc_tm5_write(chip, ADC_TM5_ADC_DIG_PARAM, buf, sizeof(buf)); if (ret) { dev_err(chip->dev, "block write failed: %d\n", ret); return ret; } return ret; } static int adc_tm5_gen2_init(struct adc_tm5_chip *chip) { u8 channels_available; int ret; unsigned int i; ret = adc_tm5_read(chip, ADC_TM5_NUM_BTM, &channels_available, sizeof(channels_available)); if (ret) { dev_err(chip->dev, "read failed for BTM channels\n"); return ret; } for (i = 0; i < chip->nchannels; i++) { if (chip->channels[i].channel >= channels_available) { dev_err(chip->dev, "Invalid channel %d\n", chip->channels[i].channel); return -EINVAL; } } mutex_init(&chip->adc_mutex_lock); return ret; } static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm, struct adc_tm5_channel *channel, struct device_node *node) { const char *name = node->name; u32 chan, value, adc_channel, varr[2]; int ret; struct device *dev = adc_tm->dev; struct of_phandle_args args; ret = of_property_read_u32(node, "reg", &chan); if (ret) { dev_err(dev, "%s: invalid channel number %d\n", name, ret); return ret; } if (chan >= ADC_TM5_NUM_CHANNELS) { dev_err(dev, "%s: channel number too big: %d\n", name, chan); return -EINVAL; } channel->channel = chan; /* * We are tied to PMIC's ADC controller, which always use single * argument for channel number. So don't bother parsing * #io-channel-cells, just enforce cell_count = 1. */ ret = of_parse_phandle_with_fixed_args(node, "io-channels", 1, 0, &args); if (ret < 0) { dev_err(dev, "%s: error parsing ADC channel number %d: %d\n", name, chan, ret); return ret; } of_node_put(args.np); if (args.args_count != 1) { dev_err(dev, "%s: invalid args count for ADC channel %d\n", name, chan); return -EINVAL; } adc_channel = args.args[0]; if (adc_tm->data->gen == ADC_TM5_GEN2) adc_channel &= 0xff; if (adc_channel >= ADC5_MAX_CHANNEL) { dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan); return -EINVAL; } channel->adc_channel = args.args[0]; channel->iio = devm_fwnode_iio_channel_get_by_name(adc_tm->dev, of_fwnode_handle(node), NULL); if (IS_ERR(channel->iio)) { ret = PTR_ERR(channel->iio); if (ret != -EPROBE_DEFER) dev_err(dev, "%s: error getting channel: %d\n", name, ret); return ret; } ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2); if (!ret) { ret = qcom_adc5_prescaling_from_dt(varr[0], varr[1]); if (ret < 0) { dev_err(dev, "%s: invalid pre-scaling <%d %d>\n", name, varr[0], varr[1]); return ret; } channel->prescale = ret; } else { /* 1:1 prescale is index 0 */ channel->prescale = 0; } ret = of_property_read_u32(node, "qcom,hw-settle-time-us", &value); if (!ret) { ret = qcom_adc5_hw_settle_time_from_dt(value, adc_tm->data->hw_settle); if (ret < 0) { dev_err(dev, "%s invalid hw-settle-time-us %d us\n", name, value); return ret; } channel->hw_settle_time = ret; } else { channel->hw_settle_time = VADC_DEF_HW_SETTLE_TIME; } if (of_property_read_bool(node, "qcom,ratiometric")) channel->cal_method = ADC_TM5_RATIOMETRIC_CAL; else channel->cal_method = ADC_TM5_ABSOLUTE_CAL; if (adc_tm->data->gen == ADC_TM5_GEN2) { ret = of_property_read_u32(node, "qcom,decimation", &value); if (!ret) { ret = qcom_adc5_decimation_from_dt(value, adc_tm->data->decimation); if (ret < 0) { dev_err(dev, "invalid decimation %d\n", value); return ret; } channel->decimation = ret; } else { channel->decimation = ADC5_DECIMATION_DEFAULT; } ret = of_property_read_u32(node, "qcom,avg-samples", &value); if (!ret) { ret = qcom_adc5_avg_samples_from_dt(value); if (ret < 0) { dev_err(dev, "invalid avg-samples %d\n", value); return ret; } channel->avg_samples = ret; } else { channel->avg_samples = VADC_DEF_AVG_SAMPLES; } } return 0; } static const struct adc_tm5_data adc_tm5_data_pmic = { .full_scale_code_volt = 0x70e4, .decimation = (unsigned int []) { 250, 420, 840 }, .hw_settle = (unsigned int []) { 15, 100, 200, 300, 400, 500, 600, 700, 1000, 2000, 4000, 8000, 16000, 32000, 64000, 128000 }, .disable_channel = adc_tm5_disable_channel, .configure = adc_tm5_configure, .isr = adc_tm5_isr, .init = adc_tm5_init, .irq_name = "pm-adc-tm5", .gen = ADC_TM5, }; static const struct adc_tm5_data adc_tm_hc_data_pmic = { .full_scale_code_volt = 0x70e4, .decimation = (unsigned int []) { 256, 512, 1024 }, .hw_settle = (unsigned int []) { 0, 100, 200, 300, 400, 500, 600, 700, 1000, 2000, 4000, 6000, 8000, 10000 }, .disable_channel = adc_tm5_disable_channel, .configure = adc_tm5_configure, .isr = adc_tm5_isr, .init = adc_tm_hc_init, .irq_name = "pm-adc-tm5", .gen = ADC_TM_HC, }; static const struct adc_tm5_data adc_tm5_gen2_data_pmic = { .full_scale_code_volt = 0x70e4, .decimation = (unsigned int []) { 85, 340, 1360 }, .hw_settle = (unsigned int []) { 15, 100, 200, 300, 400, 500, 600, 700, 1000, 2000, 4000, 8000, 16000, 32000, 64000, 128000 }, .disable_channel = adc_tm5_gen2_disable_channel, .configure = adc_tm5_gen2_configure, .isr = adc_tm5_gen2_isr, .init = adc_tm5_gen2_init, .irq_name = "pm-adc-tm5-gen2", .gen = ADC_TM5_GEN2, }; static int adc_tm5_get_dt_data(struct adc_tm5_chip *adc_tm, struct device_node *node) { struct adc_tm5_channel *channels; struct device_node *child; u32 value; int ret; struct device *dev = adc_tm->dev; adc_tm->nchannels = of_get_available_child_count(node); if (!adc_tm->nchannels) return -EINVAL; adc_tm->channels = devm_kcalloc(dev, adc_tm->nchannels, sizeof(*adc_tm->channels), GFP_KERNEL); if (!adc_tm->channels) return -ENOMEM; channels = adc_tm->channels; adc_tm->data = of_device_get_match_data(dev); if (!adc_tm->data) adc_tm->data = &adc_tm5_data_pmic; ret = of_property_read_u32(node, "qcom,decimation", &value); if (!ret) { ret = qcom_adc5_decimation_from_dt(value, adc_tm->data->decimation); if (ret < 0) { dev_err(dev, "invalid decimation %d\n", value); return ret; } adc_tm->decimation = ret; } else { adc_tm->decimation = ADC5_DECIMATION_DEFAULT; } ret = of_property_read_u32(node, "qcom,avg-samples", &value); if (!ret) { ret = qcom_adc5_avg_samples_from_dt(value); if (ret < 0) { dev_err(dev, "invalid avg-samples %d\n", value); return ret; } adc_tm->avg_samples = ret; } else { adc_tm->avg_samples = VADC_DEF_AVG_SAMPLES; } for_each_available_child_of_node(node, child) { ret = adc_tm5_get_dt_channel_data(adc_tm, channels, child); if (ret) { of_node_put(child); return ret; } channels++; } return 0; } static int adc_tm5_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device *dev = &pdev->dev; struct adc_tm5_chip *adc_tm; struct regmap *regmap; int ret, irq; u32 reg; regmap = dev_get_regmap(dev->parent, NULL); if (!regmap) return -ENODEV; ret = of_property_read_u32(node, "reg", &reg); if (ret) return ret; adc_tm = devm_kzalloc(&pdev->dev, sizeof(*adc_tm), GFP_KERNEL); if (!adc_tm) return -ENOMEM; adc_tm->regmap = regmap; adc_tm->dev = dev; adc_tm->base = reg; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = adc_tm5_get_dt_data(adc_tm, node); if (ret) return dev_err_probe(dev, ret, "get dt data failed\n"); ret = adc_tm->data->init(adc_tm); if (ret) { dev_err(dev, "adc-tm init failed\n"); return ret; } ret = adc_tm5_register_tzd(adc_tm); if (ret) { dev_err(dev, "tzd register failed\n"); return ret; } return devm_request_threaded_irq(dev, irq, NULL, adc_tm->data->isr, IRQF_ONESHOT, adc_tm->data->irq_name, adc_tm); } static const struct of_device_id adc_tm5_match_table[] = { { .compatible = "qcom,spmi-adc-tm5", .data = &adc_tm5_data_pmic, }, { .compatible = "qcom,spmi-adc-tm-hc", .data = &adc_tm_hc_data_pmic, }, { .compatible = "qcom,spmi-adc-tm5-gen2", .data = &adc_tm5_gen2_data_pmic, }, { } }; MODULE_DEVICE_TABLE(of, adc_tm5_match_table); static struct platform_driver adc_tm5_driver = { .driver = { .name = "qcom-spmi-adc-tm5", .of_match_table = adc_tm5_match_table, }, .probe = adc_tm5_probe, }; module_platform_driver(adc_tm5_driver); MODULE_DESCRIPTION("SPMI PMIC Thermal Monitor ADC driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019, Linaro Limited */ #include <linux/bitops.h> #include <linux/regmap.h> #include <linux/delay.h> #include <linux/slab.h> #include "tsens.h" /* ----- SROT ------ */ #define SROT_HW_VER_OFF 0x0000 #define SROT_CTRL_OFF 0x0004 /* ----- TM ------ */ #define TM_INT_EN_OFF 0x0000 #define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004 #define TM_Sn_STATUS_OFF 0x0044 #define TM_TRDY_OFF 0x0084 #define TM_HIGH_LOW_INT_STATUS_OFF 0x0088 #define TM_HIGH_LOW_Sn_INT_THRESHOLD_OFF 0x0090 static struct tsens_legacy_calibration_format tsens_qcs404_nvmem = { .base_len = 8, .base_shift = 2, .sp_len = 6, .mode = { 4, 0 }, .invalid = { 4, 2 }, .base = { { 4, 3 }, { 4, 11 } }, .sp = { { { 0, 0 }, { 0, 6 } }, { { 0, 12 }, { 0, 18 } }, { { 0, 24 }, { 0, 30 } }, { { 1, 4 }, { 1, 10 } }, { { 1, 16 }, { 1, 22 } }, { { 2, 0 }, { 2, 6 } }, { { 2, 12 }, { 2, 18 } }, { { 2, 24 }, { 2, 30 } }, { { 3, 4 }, { 3, 10 } }, { { 3, 16 }, { 3, 22 } }, }, }; static int calibrate_v1(struct tsens_priv *priv) { u32 p1[10], p2[10]; u32 *qfprom_cdata; int mode, ret; ret = tsens_calibrate_common(priv); if (!ret) return 0; qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib"); if (IS_ERR(qfprom_cdata)) return PTR_ERR(qfprom_cdata); mode = tsens_read_calibration_legacy(priv, &tsens_qcs404_nvmem, p1, p2, qfprom_cdata, NULL); compute_intercept_slope(priv, p1, p2, mode); kfree(qfprom_cdata); return 0; } /* v1.x: msm8956,8976,qcs404,405 */ static struct tsens_features tsens_v1_feat = { .ver_major = VER_1_X, .crit_int = 0, .combo_int = 0, .adc = 1, .srot_split = 1, .max_sensors = 11, .trip_min_temp = -40000, .trip_max_temp = 120000, }; static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = { /* ----- SROT ------ */ /* VERSION */ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31), [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27), [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15), /* CTRL_OFFSET */ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0), [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1), [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13), /* ----- TM ------ */ /* INTERRUPT ENABLE */ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0), /* UPPER/LOWER TEMPERATURE THRESHOLDS */ REG_FIELD_FOR_EACH_SENSOR11(LOW_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 0, 9), REG_FIELD_FOR_EACH_SENSOR11(UP_THRESH, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 10, 19), /* UPPER/LOWER INTERRUPTS [CLEAR/STATUS] */ REG_FIELD_FOR_EACH_SENSOR11(LOW_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 20, 20), REG_FIELD_FOR_EACH_SENSOR11(UP_INT_CLEAR, TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF, 21, 21), [LOW_INT_STATUS_0] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 0, 0), [LOW_INT_STATUS_1] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 1, 1), [LOW_INT_STATUS_2] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 2, 2), [LOW_INT_STATUS_3] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 3, 3), [LOW_INT_STATUS_4] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 4, 4), [LOW_INT_STATUS_5] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 5, 5), [LOW_INT_STATUS_6] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 6, 6), [LOW_INT_STATUS_7] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 7, 7), [UP_INT_STATUS_0] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 8, 8), [UP_INT_STATUS_1] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 9, 9), [UP_INT_STATUS_2] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 10, 10), [UP_INT_STATUS_3] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 11, 11), [UP_INT_STATUS_4] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 12, 12), [UP_INT_STATUS_5] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 13, 13), [UP_INT_STATUS_6] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 14, 14), [UP_INT_STATUS_7] = REG_FIELD(TM_HIGH_LOW_INT_STATUS_OFF, 15, 15), /* NO CRITICAL INTERRUPT SUPPORT on v1 */ /* Sn_STATUS */ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9), REG_FIELD_FOR_EACH_SENSOR11(VALID, TM_Sn_STATUS_OFF, 14, 14), /* xxx_STATUS bits: 1 == threshold violated */ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10), REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11), REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12), /* No CRITICAL field on v1.x */ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13), /* TRDY: 1=ready, 0=in progress */ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0), }; static int __init init_8956(struct tsens_priv *priv) { priv->sensor[0].slope = 3313; priv->sensor[1].slope = 3275; priv->sensor[2].slope = 3320; priv->sensor[3].slope = 3246; priv->sensor[4].slope = 3279; priv->sensor[5].slope = 3257; priv->sensor[6].slope = 3234; priv->sensor[7].slope = 3269; priv->sensor[8].slope = 3255; priv->sensor[9].slope = 3239; priv->sensor[10].slope = 3286; return init_common(priv); } static const struct tsens_ops ops_generic_v1 = { .init = init_common, .calibrate = calibrate_v1, .get_temp = get_temp_tsens_valid, }; struct tsens_plat_data data_tsens_v1 = { .ops = &ops_generic_v1, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, }; static const struct tsens_ops ops_8956 = { .init = init_8956, .calibrate = tsens_calibrate_common, .get_temp = get_temp_tsens_valid, }; struct tsens_plat_data data_8956 = { .num_sensors = 11, .ops = &ops_8956, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, }; static const struct tsens_ops ops_8976 = { .init = init_common, .calibrate = tsens_calibrate_common, .get_temp = get_temp_tsens_valid, }; struct tsens_plat_data data_8976 = { .num_sensors = 11, .ops = &ops_8976, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, };
linux-master
drivers/thermal/qcom/tsens-v1.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. * Copyright (c) 2019, 2020, Linaro Ltd. */ #include <linux/debugfs.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/mfd/syscon.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/thermal.h> #include "../thermal_hwmon.h" #include "tsens.h" /** * struct tsens_irq_data - IRQ status and temperature violations * @up_viol: upper threshold violated * @up_thresh: upper threshold temperature value * @up_irq_mask: mask register for upper threshold irqs * @up_irq_clear: clear register for uppper threshold irqs * @low_viol: lower threshold violated * @low_thresh: lower threshold temperature value * @low_irq_mask: mask register for lower threshold irqs * @low_irq_clear: clear register for lower threshold irqs * @crit_viol: critical threshold violated * @crit_thresh: critical threshold temperature value * @crit_irq_mask: mask register for critical threshold irqs * @crit_irq_clear: clear register for critical threshold irqs * * Structure containing data about temperature threshold settings and * irq status if they were violated. */ struct tsens_irq_data { u32 up_viol; int up_thresh; u32 up_irq_mask; u32 up_irq_clear; u32 low_viol; int low_thresh; u32 low_irq_mask; u32 low_irq_clear; u32 crit_viol; u32 crit_thresh; u32 crit_irq_mask; u32 crit_irq_clear; }; char *qfprom_read(struct device *dev, const char *cname) { struct nvmem_cell *cell; ssize_t data; char *ret; cell = nvmem_cell_get(dev, cname); if (IS_ERR(cell)) return ERR_CAST(cell); ret = nvmem_cell_read(cell, &data); nvmem_cell_put(cell); return ret; } int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2, bool backup) { u32 mode; u32 base1, base2; char name[] = "sXX_pY_backup"; /* s10_p1_backup */ int i, ret; if (priv->num_sensors > MAX_SENSORS) return -EINVAL; ret = snprintf(name, sizeof(name), "mode%s", backup ? "_backup" : ""); if (ret < 0) return ret; ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &mode); if (ret == -ENOENT) dev_warn(priv->dev, "Please migrate to separate nvmem cells for calibration data\n"); if (ret < 0) return ret; dev_dbg(priv->dev, "calibration mode is %d\n", mode); ret = snprintf(name, sizeof(name), "base1%s", backup ? "_backup" : ""); if (ret < 0) return ret; ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &base1); if (ret < 0) return ret; ret = snprintf(name, sizeof(name), "base2%s", backup ? "_backup" : ""); if (ret < 0) return ret; ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &base2); if (ret < 0) return ret; for (i = 0; i < priv->num_sensors; i++) { ret = snprintf(name, sizeof(name), "s%d_p1%s", priv->sensor[i].hw_id, backup ? "_backup" : ""); if (ret < 0) return ret; ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &p1[i]); if (ret) return ret; ret = snprintf(name, sizeof(name), "s%d_p2%s", priv->sensor[i].hw_id, backup ? "_backup" : ""); if (ret < 0) return ret; ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &p2[i]); if (ret) return ret; } switch (mode) { case ONE_PT_CALIB: for (i = 0; i < priv->num_sensors; i++) p1[i] = p1[i] + (base1 << shift); break; case TWO_PT_CALIB: case TWO_PT_CALIB_NO_OFFSET: for (i = 0; i < priv->num_sensors; i++) p2[i] = (p2[i] + base2) << shift; fallthrough; case ONE_PT_CALIB2: case ONE_PT_CALIB2_NO_OFFSET: for (i = 0; i < priv->num_sensors; i++) p1[i] = (p1[i] + base1) << shift; break; default: dev_dbg(priv->dev, "calibrationless mode\n"); for (i = 0; i < priv->num_sensors; i++) { p1[i] = 500; p2[i] = 780; } } /* Apply calibration offset workaround except for _NO_OFFSET modes */ switch (mode) { case TWO_PT_CALIB: for (i = 0; i < priv->num_sensors; i++) p2[i] += priv->sensor[i].p2_calib_offset; fallthrough; case ONE_PT_CALIB2: for (i = 0; i < priv->num_sensors; i++) p1[i] += priv->sensor[i].p1_calib_offset; break; } return mode; } int tsens_calibrate_nvmem(struct tsens_priv *priv, int shift) { u32 p1[MAX_SENSORS], p2[MAX_SENSORS]; int mode; mode = tsens_read_calibration(priv, shift, p1, p2, false); if (mode < 0) return mode; compute_intercept_slope(priv, p1, p2, mode); return 0; } int tsens_calibrate_common(struct tsens_priv *priv) { return tsens_calibrate_nvmem(priv, 2); } static u32 tsens_read_cell(const struct tsens_single_value *cell, u8 len, u32 *data0, u32 *data1) { u32 val; u32 *data = cell->blob ? data1 : data0; if (cell->shift + len <= 32) { val = data[cell->idx] >> cell->shift; } else { u8 part = 32 - cell->shift; val = data[cell->idx] >> cell->shift; val |= data[cell->idx + 1] << part; } return val & ((1 << len) - 1); } int tsens_read_calibration_legacy(struct tsens_priv *priv, const struct tsens_legacy_calibration_format *format, u32 *p1, u32 *p2, u32 *cdata0, u32 *cdata1) { u32 mode, invalid; u32 base1, base2; int i; mode = tsens_read_cell(&format->mode, 2, cdata0, cdata1); invalid = tsens_read_cell(&format->invalid, 1, cdata0, cdata1); if (invalid) mode = NO_PT_CALIB; dev_dbg(priv->dev, "calibration mode is %d\n", mode); base1 = tsens_read_cell(&format->base[0], format->base_len, cdata0, cdata1); base2 = tsens_read_cell(&format->base[1], format->base_len, cdata0, cdata1); for (i = 0; i < priv->num_sensors; i++) { p1[i] = tsens_read_cell(&format->sp[i][0], format->sp_len, cdata0, cdata1); p2[i] = tsens_read_cell(&format->sp[i][1], format->sp_len, cdata0, cdata1); } switch (mode) { case ONE_PT_CALIB: for (i = 0; i < priv->num_sensors; i++) p1[i] = p1[i] + (base1 << format->base_shift); break; case TWO_PT_CALIB: for (i = 0; i < priv->num_sensors; i++) p2[i] = (p2[i] + base2) << format->base_shift; fallthrough; case ONE_PT_CALIB2: for (i = 0; i < priv->num_sensors; i++) p1[i] = (p1[i] + base1) << format->base_shift; break; default: dev_dbg(priv->dev, "calibrationless mode\n"); for (i = 0; i < priv->num_sensors; i++) { p1[i] = 500; p2[i] = 780; } } return mode; } /* * Use this function on devices where slope and offset calculations * depend on calibration data read from qfprom. On others the slope * and offset values are derived from tz->tzp->slope and tz->tzp->offset * resp. */ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1, u32 *p2, u32 mode) { int i; int num, den; for (i = 0; i < priv->num_sensors; i++) { dev_dbg(priv->dev, "%s: sensor%d - data_point1:%#x data_point2:%#x\n", __func__, i, p1[i], p2[i]); if (!priv->sensor[i].slope) priv->sensor[i].slope = SLOPE_DEFAULT; if (mode == TWO_PT_CALIB || mode == TWO_PT_CALIB_NO_OFFSET) { /* * slope (m) = adc_code2 - adc_code1 (y2 - y1)/ * temp_120_degc - temp_30_degc (x2 - x1) */ num = p2[i] - p1[i]; num *= SLOPE_FACTOR; den = CAL_DEGC_PT2 - CAL_DEGC_PT1; priv->sensor[i].slope = num / den; } priv->sensor[i].offset = (p1[i] * SLOPE_FACTOR) - (CAL_DEGC_PT1 * priv->sensor[i].slope); dev_dbg(priv->dev, "%s: offset:%d\n", __func__, priv->sensor[i].offset); } } static inline u32 degc_to_code(int degc, const struct tsens_sensor *s) { u64 code = div_u64(((u64)degc * s->slope + s->offset), SLOPE_FACTOR); pr_debug("%s: raw_code: 0x%llx, degc:%d\n", __func__, code, degc); return clamp_val(code, THRESHOLD_MIN_ADC_CODE, THRESHOLD_MAX_ADC_CODE); } static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s) { int degc, num, den; num = (adc_code * SLOPE_FACTOR) - s->offset; den = s->slope; if (num > 0) degc = num + (den / 2); else if (num < 0) degc = num - (den / 2); else degc = num; degc /= den; return degc; } /** * tsens_hw_to_mC - Return sign-extended temperature in mCelsius. * @s: Pointer to sensor struct * @field: Index into regmap_field array pointing to temperature data * * This function handles temperature returned in ADC code or deciCelsius * depending on IP version. * * Return: Temperature in milliCelsius on success, a negative errno will * be returned in error cases */ static int tsens_hw_to_mC(const struct tsens_sensor *s, int field) { struct tsens_priv *priv = s->priv; u32 resolution; u32 temp = 0; int ret; resolution = priv->fields[LAST_TEMP_0].msb - priv->fields[LAST_TEMP_0].lsb; ret = regmap_field_read(priv->rf[field], &temp); if (ret) return ret; /* Convert temperature from ADC code to milliCelsius */ if (priv->feat->adc) return code_to_degc(temp, s) * 1000; /* deciCelsius -> milliCelsius along with sign extension */ return sign_extend32(temp, resolution) * 100; } /** * tsens_mC_to_hw - Convert temperature to hardware register value * @s: Pointer to sensor struct * @temp: temperature in milliCelsius to be programmed to hardware * * This function outputs the value to be written to hardware in ADC code * or deciCelsius depending on IP version. * * Return: ADC code or temperature in deciCelsius. */ static int tsens_mC_to_hw(const struct tsens_sensor *s, int temp) { struct tsens_priv *priv = s->priv; /* milliC to adc code */ if (priv->feat->adc) return degc_to_code(temp / 1000, s); /* milliC to deciC */ return temp / 100; } static inline enum tsens_ver tsens_version(struct tsens_priv *priv) { return priv->feat->ver_major; } static void tsens_set_interrupt_v1(struct tsens_priv *priv, u32 hw_id, enum tsens_irq_type irq_type, bool enable) { u32 index = 0; switch (irq_type) { case UPPER: index = UP_INT_CLEAR_0 + hw_id; break; case LOWER: index = LOW_INT_CLEAR_0 + hw_id; break; case CRITICAL: /* No critical interrupts before v2 */ return; } regmap_field_write(priv->rf[index], enable ? 0 : 1); } static void tsens_set_interrupt_v2(struct tsens_priv *priv, u32 hw_id, enum tsens_irq_type irq_type, bool enable) { u32 index_mask = 0, index_clear = 0; /* * To enable the interrupt flag for a sensor: * - clear the mask bit * To disable the interrupt flag for a sensor: * - Mask further interrupts for this sensor * - Write 1 followed by 0 to clear the interrupt */ switch (irq_type) { case UPPER: index_mask = UP_INT_MASK_0 + hw_id; index_clear = UP_INT_CLEAR_0 + hw_id; break; case LOWER: index_mask = LOW_INT_MASK_0 + hw_id; index_clear = LOW_INT_CLEAR_0 + hw_id; break; case CRITICAL: index_mask = CRIT_INT_MASK_0 + hw_id; index_clear = CRIT_INT_CLEAR_0 + hw_id; break; } if (enable) { regmap_field_write(priv->rf[index_mask], 0); } else { regmap_field_write(priv->rf[index_mask], 1); regmap_field_write(priv->rf[index_clear], 1); regmap_field_write(priv->rf[index_clear], 0); } } /** * tsens_set_interrupt - Set state of an interrupt * @priv: Pointer to tsens controller private data * @hw_id: Hardware ID aka. sensor number * @irq_type: irq_type from enum tsens_irq_type * @enable: false = disable, true = enable * * Call IP-specific function to set state of an interrupt * * Return: void */ static void tsens_set_interrupt(struct tsens_priv *priv, u32 hw_id, enum tsens_irq_type irq_type, bool enable) { dev_dbg(priv->dev, "[%u] %s: %s -> %s\n", hw_id, __func__, irq_type ? ((irq_type == 1) ? "UP" : "CRITICAL") : "LOW", enable ? "en" : "dis"); if (tsens_version(priv) > VER_1_X) tsens_set_interrupt_v2(priv, hw_id, irq_type, enable); else tsens_set_interrupt_v1(priv, hw_id, irq_type, enable); } /** * tsens_threshold_violated - Check if a sensor temperature violated a preset threshold * @priv: Pointer to tsens controller private data * @hw_id: Hardware ID aka. sensor number * @d: Pointer to irq state data * * Return: 0 if threshold was not violated, 1 if it was violated and negative * errno in case of errors */ static int tsens_threshold_violated(struct tsens_priv *priv, u32 hw_id, struct tsens_irq_data *d) { int ret; ret = regmap_field_read(priv->rf[UPPER_STATUS_0 + hw_id], &d->up_viol); if (ret) return ret; ret = regmap_field_read(priv->rf[LOWER_STATUS_0 + hw_id], &d->low_viol); if (ret) return ret; if (priv->feat->crit_int) { ret = regmap_field_read(priv->rf[CRITICAL_STATUS_0 + hw_id], &d->crit_viol); if (ret) return ret; } if (d->up_viol || d->low_viol || d->crit_viol) return 1; return 0; } static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id, const struct tsens_sensor *s, struct tsens_irq_data *d) { int ret; ret = regmap_field_read(priv->rf[UP_INT_CLEAR_0 + hw_id], &d->up_irq_clear); if (ret) return ret; ret = regmap_field_read(priv->rf[LOW_INT_CLEAR_0 + hw_id], &d->low_irq_clear); if (ret) return ret; if (tsens_version(priv) > VER_1_X) { ret = regmap_field_read(priv->rf[UP_INT_MASK_0 + hw_id], &d->up_irq_mask); if (ret) return ret; ret = regmap_field_read(priv->rf[LOW_INT_MASK_0 + hw_id], &d->low_irq_mask); if (ret) return ret; ret = regmap_field_read(priv->rf[CRIT_INT_CLEAR_0 + hw_id], &d->crit_irq_clear); if (ret) return ret; ret = regmap_field_read(priv->rf[CRIT_INT_MASK_0 + hw_id], &d->crit_irq_mask); if (ret) return ret; d->crit_thresh = tsens_hw_to_mC(s, CRIT_THRESH_0 + hw_id); } else { /* No mask register on older TSENS */ d->up_irq_mask = 0; d->low_irq_mask = 0; d->crit_irq_clear = 0; d->crit_irq_mask = 0; d->crit_thresh = 0; } d->up_thresh = tsens_hw_to_mC(s, UP_THRESH_0 + hw_id); d->low_thresh = tsens_hw_to_mC(s, LOW_THRESH_0 + hw_id); dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u|%u) | clr(%u|%u|%u) | mask(%u|%u|%u)\n", hw_id, __func__, (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", d->low_viol, d->up_viol, d->crit_viol, d->low_irq_clear, d->up_irq_clear, d->crit_irq_clear, d->low_irq_mask, d->up_irq_mask, d->crit_irq_mask); dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d:%d)\n", hw_id, __func__, (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", d->low_thresh, d->up_thresh, d->crit_thresh); return 0; } static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver) { if (ver > VER_1_X) return mask & (1 << hw_id); /* v1, v0.1 don't have a irq mask register */ return 0; } /** * tsens_critical_irq_thread() - Threaded handler for critical interrupts * @irq: irq number * @data: tsens controller private data * * Check FSM watchdog bark status and clear if needed. * Check all sensors to find ones that violated their critical threshold limits. * Clear and then re-enable the interrupt. * * The level-triggered interrupt might deassert if the temperature returned to * within the threshold limits by the time the handler got scheduled. We * consider the irq to have been handled in that case. * * Return: IRQ_HANDLED */ static irqreturn_t tsens_critical_irq_thread(int irq, void *data) { struct tsens_priv *priv = data; struct tsens_irq_data d; int temp, ret, i; u32 wdog_status, wdog_count; if (priv->feat->has_watchdog) { ret = regmap_field_read(priv->rf[WDOG_BARK_STATUS], &wdog_status); if (ret) return ret; if (wdog_status) { /* Clear WDOG interrupt */ regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 1); regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 0); ret = regmap_field_read(priv->rf[WDOG_BARK_COUNT], &wdog_count); if (ret) return ret; if (wdog_count) dev_dbg(priv->dev, "%s: watchdog count: %d\n", __func__, wdog_count); /* Fall through to handle critical interrupts if any */ } } for (i = 0; i < priv->num_sensors; i++) { const struct tsens_sensor *s = &priv->sensor[i]; u32 hw_id = s->hw_id; if (!s->tzd) continue; if (!tsens_threshold_violated(priv, hw_id, &d)) continue; ret = get_temp_tsens_valid(s, &temp); if (ret) { dev_err(priv->dev, "[%u] %s: error reading sensor\n", hw_id, __func__); continue; } tsens_read_irq_state(priv, hw_id, s, &d); if (d.crit_viol && !masked_irq(hw_id, d.crit_irq_mask, tsens_version(priv))) { /* Mask critical interrupts, unused on Linux */ tsens_set_interrupt(priv, hw_id, CRITICAL, false); } } return IRQ_HANDLED; } /** * tsens_irq_thread - Threaded interrupt handler for uplow interrupts * @irq: irq number * @data: tsens controller private data * * Check all sensors to find ones that violated their threshold limits. If the * temperature is still outside the limits, call thermal_zone_device_update() to * update the thresholds, else re-enable the interrupts. * * The level-triggered interrupt might deassert if the temperature returned to * within the threshold limits by the time the handler got scheduled. We * consider the irq to have been handled in that case. * * Return: IRQ_HANDLED */ static irqreturn_t tsens_irq_thread(int irq, void *data) { struct tsens_priv *priv = data; struct tsens_irq_data d; int i; for (i = 0; i < priv->num_sensors; i++) { const struct tsens_sensor *s = &priv->sensor[i]; u32 hw_id = s->hw_id; if (!s->tzd) continue; if (!tsens_threshold_violated(priv, hw_id, &d)) continue; thermal_zone_device_update(s->tzd, THERMAL_EVENT_UNSPECIFIED); if (tsens_version(priv) < VER_0_1) { /* Constraint: There is only 1 interrupt control register for all * 11 temperature sensor. So monitoring more than 1 sensor based * on interrupts will yield inconsistent result. To overcome this * issue we will monitor only sensor 0 which is the master sensor. */ break; } } return IRQ_HANDLED; } /** * tsens_combined_irq_thread() - Threaded interrupt handler for combined interrupts * @irq: irq number * @data: tsens controller private data * * Handle the combined interrupt as if it were 2 separate interrupts, so call the * critical handler first and then the up/low one. * * Return: IRQ_HANDLED */ static irqreturn_t tsens_combined_irq_thread(int irq, void *data) { irqreturn_t ret; ret = tsens_critical_irq_thread(irq, data); if (ret != IRQ_HANDLED) return ret; return tsens_irq_thread(irq, data); } static int tsens_set_trips(struct thermal_zone_device *tz, int low, int high) { struct tsens_sensor *s = thermal_zone_device_priv(tz); struct tsens_priv *priv = s->priv; struct device *dev = priv->dev; struct tsens_irq_data d; unsigned long flags; int high_val, low_val, cl_high, cl_low; u32 hw_id = s->hw_id; if (tsens_version(priv) < VER_0_1) { /* Pre v0.1 IP had a single register for each type of interrupt * and thresholds */ hw_id = 0; } dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n", hw_id, __func__, low, high); cl_high = clamp_val(high, priv->feat->trip_min_temp, priv->feat->trip_max_temp); cl_low = clamp_val(low, priv->feat->trip_min_temp, priv->feat->trip_max_temp); high_val = tsens_mC_to_hw(s, cl_high); low_val = tsens_mC_to_hw(s, cl_low); spin_lock_irqsave(&priv->ul_lock, flags); tsens_read_irq_state(priv, hw_id, s, &d); /* Write the new thresholds and clear the status */ regmap_field_write(priv->rf[LOW_THRESH_0 + hw_id], low_val); regmap_field_write(priv->rf[UP_THRESH_0 + hw_id], high_val); tsens_set_interrupt(priv, hw_id, LOWER, true); tsens_set_interrupt(priv, hw_id, UPPER, true); spin_unlock_irqrestore(&priv->ul_lock, flags); dev_dbg(dev, "[%u] %s: (%d:%d)->(%d:%d)\n", hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high); return 0; } static int tsens_enable_irq(struct tsens_priv *priv) { int ret; int val = tsens_version(priv) > VER_1_X ? 7 : 1; ret = regmap_field_write(priv->rf[INT_EN], val); if (ret < 0) dev_err(priv->dev, "%s: failed to enable interrupts\n", __func__); return ret; } static void tsens_disable_irq(struct tsens_priv *priv) { regmap_field_write(priv->rf[INT_EN], 0); } int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp) { struct tsens_priv *priv = s->priv; int hw_id = s->hw_id; u32 temp_idx = LAST_TEMP_0 + hw_id; u32 valid_idx = VALID_0 + hw_id; u32 valid; int ret; /* VER_0 doesn't have VALID bit */ if (tsens_version(priv) == VER_0) goto get_temp; /* Valid bit is 0 for 6 AHB clock cycles. * At 19.2MHz, 1 AHB clock is ~60ns. * We should enter this loop very, very rarely. * Wait 1 us since it's the min of poll_timeout macro. * Old value was 400 ns. */ ret = regmap_field_read_poll_timeout(priv->rf[valid_idx], valid, valid, 1, 20 * USEC_PER_MSEC); if (ret) return ret; get_temp: /* Valid bit is set, OK to read the temperature */ *temp = tsens_hw_to_mC(s, temp_idx); return 0; } int get_temp_common(const struct tsens_sensor *s, int *temp) { struct tsens_priv *priv = s->priv; int hw_id = s->hw_id; int last_temp = 0, ret, trdy; unsigned long timeout; timeout = jiffies + usecs_to_jiffies(TIMEOUT_US); do { if (tsens_version(priv) == VER_0) { ret = regmap_field_read(priv->rf[TRDY], &trdy); if (ret) return ret; if (!trdy) continue; } ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp); if (ret) return ret; *temp = code_to_degc(last_temp, s) * 1000; return 0; } while (time_before(jiffies, timeout)); return -ETIMEDOUT; } #ifdef CONFIG_DEBUG_FS static int dbg_sensors_show(struct seq_file *s, void *data) { struct platform_device *pdev = s->private; struct tsens_priv *priv = platform_get_drvdata(pdev); int i; seq_printf(s, "max: %2d\nnum: %2d\n\n", priv->feat->max_sensors, priv->num_sensors); seq_puts(s, " id slope offset\n--------------------------\n"); for (i = 0; i < priv->num_sensors; i++) { seq_printf(s, "%8d %8d %8d\n", priv->sensor[i].hw_id, priv->sensor[i].slope, priv->sensor[i].offset); } return 0; } static int dbg_version_show(struct seq_file *s, void *data) { struct platform_device *pdev = s->private; struct tsens_priv *priv = platform_get_drvdata(pdev); u32 maj_ver, min_ver, step_ver; int ret; if (tsens_version(priv) > VER_0_1) { ret = regmap_field_read(priv->rf[VER_MAJOR], &maj_ver); if (ret) return ret; ret = regmap_field_read(priv->rf[VER_MINOR], &min_ver); if (ret) return ret; ret = regmap_field_read(priv->rf[VER_STEP], &step_ver); if (ret) return ret; seq_printf(s, "%d.%d.%d\n", maj_ver, min_ver, step_ver); } else { seq_printf(s, "0.%d.0\n", priv->feat->ver_major); } return 0; } DEFINE_SHOW_ATTRIBUTE(dbg_version); DEFINE_SHOW_ATTRIBUTE(dbg_sensors); static void tsens_debug_init(struct platform_device *pdev) { struct tsens_priv *priv = platform_get_drvdata(pdev); priv->debug_root = debugfs_lookup("tsens", NULL); if (!priv->debug_root) priv->debug_root = debugfs_create_dir("tsens", NULL); /* A directory for each instance of the TSENS IP */ priv->debug = debugfs_create_dir(dev_name(&pdev->dev), priv->debug_root); debugfs_create_file("version", 0444, priv->debug, pdev, &dbg_version_fops); debugfs_create_file("sensors", 0444, priv->debug, pdev, &dbg_sensors_fops); } #else static inline void tsens_debug_init(struct platform_device *pdev) {} #endif static const struct regmap_config tsens_config = { .name = "tm", .reg_bits = 32, .val_bits = 32, .reg_stride = 4, }; static const struct regmap_config tsens_srot_config = { .name = "srot", .reg_bits = 32, .val_bits = 32, .reg_stride = 4, }; int __init init_common(struct tsens_priv *priv) { void __iomem *tm_base, *srot_base; struct device *dev = priv->dev; u32 ver_minor; struct resource *res; u32 enabled; int ret, i, j; struct platform_device *op = of_find_device_by_node(priv->dev->of_node); if (!op) return -EINVAL; if (op->num_resources > 1) { /* DT with separate SROT and TM address space */ priv->tm_offset = 0; res = platform_get_resource(op, IORESOURCE_MEM, 1); srot_base = devm_ioremap_resource(dev, res); if (IS_ERR(srot_base)) { ret = PTR_ERR(srot_base); goto err_put_device; } priv->srot_map = devm_regmap_init_mmio(dev, srot_base, &tsens_srot_config); if (IS_ERR(priv->srot_map)) { ret = PTR_ERR(priv->srot_map); goto err_put_device; } } else { /* old DTs where SROT and TM were in a contiguous 2K block */ priv->tm_offset = 0x1000; } if (tsens_version(priv) >= VER_0_1) { res = platform_get_resource(op, IORESOURCE_MEM, 0); tm_base = devm_ioremap_resource(dev, res); if (IS_ERR(tm_base)) { ret = PTR_ERR(tm_base); goto err_put_device; } priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config); } else { /* VER_0 share the same gcc regs using a syscon */ struct device *parent = priv->dev->parent; if (parent) priv->tm_map = syscon_node_to_regmap(parent->of_node); } if (IS_ERR_OR_NULL(priv->tm_map)) { if (!priv->tm_map) ret = -ENODEV; else ret = PTR_ERR(priv->tm_map); goto err_put_device; } /* VER_0 have only tm_map */ if (!priv->srot_map) priv->srot_map = priv->tm_map; if (tsens_version(priv) > VER_0_1) { for (i = VER_MAJOR; i <= VER_STEP; i++) { priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[i]); if (IS_ERR(priv->rf[i])) { ret = PTR_ERR(priv->rf[i]); goto err_put_device; } } ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor); if (ret) goto err_put_device; } priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[TSENS_EN]); if (IS_ERR(priv->rf[TSENS_EN])) { ret = PTR_ERR(priv->rf[TSENS_EN]); goto err_put_device; } /* in VER_0 TSENS need to be explicitly enabled */ if (tsens_version(priv) == VER_0) regmap_field_write(priv->rf[TSENS_EN], 1); ret = regmap_field_read(priv->rf[TSENS_EN], &enabled); if (ret) goto err_put_device; if (!enabled) { dev_err(dev, "%s: device not enabled\n", __func__); ret = -ENODEV; goto err_put_device; } priv->rf[SENSOR_EN] = devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[SENSOR_EN]); if (IS_ERR(priv->rf[SENSOR_EN])) { ret = PTR_ERR(priv->rf[SENSOR_EN]); goto err_put_device; } priv->rf[INT_EN] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[INT_EN]); if (IS_ERR(priv->rf[INT_EN])) { ret = PTR_ERR(priv->rf[INT_EN]); goto err_put_device; } priv->rf[TSENS_SW_RST] = devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[TSENS_SW_RST]); if (IS_ERR(priv->rf[TSENS_SW_RST])) { ret = PTR_ERR(priv->rf[TSENS_SW_RST]); goto err_put_device; } priv->rf[TRDY] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[TRDY]); if (IS_ERR(priv->rf[TRDY])) { ret = PTR_ERR(priv->rf[TRDY]); goto err_put_device; } /* This loop might need changes if enum regfield_ids is reordered */ for (j = LAST_TEMP_0; j <= UP_THRESH_15; j += 16) { for (i = 0; i < priv->feat->max_sensors; i++) { int idx = j + i; priv->rf[idx] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[idx]); if (IS_ERR(priv->rf[idx])) { ret = PTR_ERR(priv->rf[idx]); goto err_put_device; } } } if (priv->feat->crit_int || tsens_version(priv) < VER_0_1) { /* Loop might need changes if enum regfield_ids is reordered */ for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) { for (i = 0; i < priv->feat->max_sensors; i++) { int idx = j + i; priv->rf[idx] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[idx]); if (IS_ERR(priv->rf[idx])) { ret = PTR_ERR(priv->rf[idx]); goto err_put_device; } } } } if (tsens_version(priv) > VER_1_X && ver_minor > 2) { /* Watchdog is present only on v2.3+ */ priv->feat->has_watchdog = 1; for (i = WDOG_BARK_STATUS; i <= CC_MON_MASK; i++) { priv->rf[i] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[i]); if (IS_ERR(priv->rf[i])) { ret = PTR_ERR(priv->rf[i]); goto err_put_device; } } /* * Watchdog is already enabled, unmask the bark. * Disable cycle completion monitoring */ regmap_field_write(priv->rf[WDOG_BARK_MASK], 0); regmap_field_write(priv->rf[CC_MON_MASK], 1); } spin_lock_init(&priv->ul_lock); /* VER_0 interrupt doesn't need to be enabled */ if (tsens_version(priv) >= VER_0_1) tsens_enable_irq(priv); err_put_device: put_device(&op->dev); return ret; } static int tsens_get_temp(struct thermal_zone_device *tz, int *temp) { struct tsens_sensor *s = thermal_zone_device_priv(tz); struct tsens_priv *priv = s->priv; return priv->ops->get_temp(s, temp); } static int __maybe_unused tsens_suspend(struct device *dev) { struct tsens_priv *priv = dev_get_drvdata(dev); if (priv->ops && priv->ops->suspend) return priv->ops->suspend(priv); return 0; } static int __maybe_unused tsens_resume(struct device *dev) { struct tsens_priv *priv = dev_get_drvdata(dev); if (priv->ops && priv->ops->resume) return priv->ops->resume(priv); return 0; } static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume); static const struct of_device_id tsens_table[] = { { .compatible = "qcom,ipq8064-tsens", .data = &data_8960, }, { .compatible = "qcom,ipq8074-tsens", .data = &data_ipq8074, }, { .compatible = "qcom,mdm9607-tsens", .data = &data_9607, }, { .compatible = "qcom,msm8226-tsens", .data = &data_8226, }, { .compatible = "qcom,msm8909-tsens", .data = &data_8909, }, { .compatible = "qcom,msm8916-tsens", .data = &data_8916, }, { .compatible = "qcom,msm8939-tsens", .data = &data_8939, }, { .compatible = "qcom,msm8956-tsens", .data = &data_8956, }, { .compatible = "qcom,msm8960-tsens", .data = &data_8960, }, { .compatible = "qcom,msm8974-tsens", .data = &data_8974, }, { .compatible = "qcom,msm8976-tsens", .data = &data_8976, }, { .compatible = "qcom,msm8996-tsens", .data = &data_8996, }, { .compatible = "qcom,tsens-v1", .data = &data_tsens_v1, }, { .compatible = "qcom,tsens-v2", .data = &data_tsens_v2, }, {} }; MODULE_DEVICE_TABLE(of, tsens_table); static const struct thermal_zone_device_ops tsens_of_ops = { .get_temp = tsens_get_temp, .set_trips = tsens_set_trips, }; static int tsens_register_irq(struct tsens_priv *priv, char *irqname, irq_handler_t thread_fn) { struct platform_device *pdev; int ret, irq; pdev = of_find_device_by_node(priv->dev->of_node); if (!pdev) return -ENODEV; irq = platform_get_irq_byname(pdev, irqname); if (irq < 0) { ret = irq; /* For old DTs with no IRQ defined */ if (irq == -ENXIO) ret = 0; } else { /* VER_0 interrupt is TRIGGER_RISING, VER_0_1 and up is ONESHOT */ if (tsens_version(priv) == VER_0) ret = devm_request_threaded_irq(&pdev->dev, irq, thread_fn, NULL, IRQF_TRIGGER_RISING, dev_name(&pdev->dev), priv); else ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, thread_fn, IRQF_ONESHOT, dev_name(&pdev->dev), priv); if (ret) dev_err(&pdev->dev, "%s: failed to get irq\n", __func__); else enable_irq_wake(irq); } put_device(&pdev->dev); return ret; } static int tsens_register(struct tsens_priv *priv) { int i, ret; struct thermal_zone_device *tzd; for (i = 0; i < priv->num_sensors; i++) { priv->sensor[i].priv = priv; tzd = devm_thermal_of_zone_register(priv->dev, priv->sensor[i].hw_id, &priv->sensor[i], &tsens_of_ops); if (IS_ERR(tzd)) continue; priv->sensor[i].tzd = tzd; if (priv->ops->enable) priv->ops->enable(priv, i); devm_thermal_add_hwmon_sysfs(priv->dev, tzd); } /* VER_0 require to set MIN and MAX THRESH * These 2 regs are set using the: * - CRIT_THRESH_0 for MAX THRESH hardcoded to 120°C * - CRIT_THRESH_1 for MIN THRESH hardcoded to 0°C */ if (tsens_version(priv) < VER_0_1) { regmap_field_write(priv->rf[CRIT_THRESH_0], tsens_mC_to_hw(priv->sensor, 120000)); regmap_field_write(priv->rf[CRIT_THRESH_1], tsens_mC_to_hw(priv->sensor, 0)); } if (priv->feat->combo_int) { ret = tsens_register_irq(priv, "combined", tsens_combined_irq_thread); } else { ret = tsens_register_irq(priv, "uplow", tsens_irq_thread); if (ret < 0) return ret; if (priv->feat->crit_int) ret = tsens_register_irq(priv, "critical", tsens_critical_irq_thread); } return ret; } static int tsens_probe(struct platform_device *pdev) { int ret, i; struct device *dev; struct device_node *np; struct tsens_priv *priv; const struct tsens_plat_data *data; const struct of_device_id *id; u32 num_sensors; if (pdev->dev.of_node) dev = &pdev->dev; else dev = pdev->dev.parent; np = dev->of_node; id = of_match_node(tsens_table, np); if (id) data = id->data; else data = &data_8960; num_sensors = data->num_sensors; if (np) of_property_read_u32(np, "#qcom,sensors", &num_sensors); if (num_sensors <= 0) { dev_err(dev, "%s: invalid number of sensors\n", __func__); return -EINVAL; } priv = devm_kzalloc(dev, struct_size(priv, sensor, num_sensors), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = dev; priv->num_sensors = num_sensors; priv->ops = data->ops; for (i = 0; i < priv->num_sensors; i++) { if (data->hw_ids) priv->sensor[i].hw_id = data->hw_ids[i]; else priv->sensor[i].hw_id = i; } priv->feat = data->feat; priv->fields = data->fields; platform_set_drvdata(pdev, priv); if (!priv->ops || !priv->ops->init || !priv->ops->get_temp) return -EINVAL; ret = priv->ops->init(priv); if (ret < 0) { dev_err(dev, "%s: init failed\n", __func__); return ret; } if (priv->ops->calibrate) { ret = priv->ops->calibrate(priv); if (ret < 0) { if (ret != -EPROBE_DEFER) dev_err(dev, "%s: calibration failed\n", __func__); return ret; } } ret = tsens_register(priv); if (!ret) tsens_debug_init(pdev); return ret; } static int tsens_remove(struct platform_device *pdev) { struct tsens_priv *priv = platform_get_drvdata(pdev); debugfs_remove_recursive(priv->debug_root); tsens_disable_irq(priv); if (priv->ops->disable) priv->ops->disable(priv); return 0; } static struct platform_driver tsens_driver = { .probe = tsens_probe, .remove = tsens_remove, .driver = { .name = "qcom-tsens", .pm = &tsens_pm_ops, .of_match_table = tsens_table, }, }; module_platform_driver(tsens_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("QCOM Temperature Sensor driver"); MODULE_ALIAS("platform:qcom-tsens");
linux-master
drivers/thermal/qcom/tsens.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, The Linux Foundation. All rights reserved. */ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/regmap.h> #include <linux/thermal.h> #include "tsens.h" #define CONFIG_ADDR 0x3640 #define CONFIG_ADDR_8660 0x3620 /* CONFIG_ADDR bitmasks */ #define CONFIG 0x9b #define CONFIG_MASK 0xf #define CONFIG_8660 1 #define CONFIG_SHIFT_8660 28 #define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660) #define CNTL_ADDR 0x3620 /* CNTL_ADDR bitmasks */ #define EN BIT(0) #define SW_RST BIT(1) #define MEASURE_PERIOD BIT(18) #define SLP_CLK_ENA BIT(26) #define SLP_CLK_ENA_8660 BIT(24) #define SENSOR0_SHIFT 3 #define THRESHOLD_ADDR 0x3624 #define INT_STATUS_ADDR 0x363c #define S0_STATUS_OFF 0x3628 #define S1_STATUS_OFF 0x362c #define S2_STATUS_OFF 0x3630 #define S3_STATUS_OFF 0x3634 #define S4_STATUS_OFF 0x3638 #define S5_STATUS_OFF 0x3664 /* Sensors 5-10 found on apq8064/msm8960 */ #define S6_STATUS_OFF 0x3668 #define S7_STATUS_OFF 0x366c #define S8_STATUS_OFF 0x3670 #define S9_STATUS_OFF 0x3674 #define S10_STATUS_OFF 0x3678 /* Original slope - 350 to compensate mC to C inaccuracy */ static u32 tsens_msm8960_slope[] = { 826, 826, 804, 826, 761, 782, 782, 849, 782, 849, 782 }; static int suspend_8960(struct tsens_priv *priv) { int ret; unsigned int mask; struct regmap *map = priv->tm_map; ret = regmap_read(map, THRESHOLD_ADDR, &priv->ctx.threshold); if (ret) return ret; ret = regmap_read(map, CNTL_ADDR, &priv->ctx.control); if (ret) return ret; if (priv->num_sensors > 1) mask = SLP_CLK_ENA | EN; else mask = SLP_CLK_ENA_8660 | EN; ret = regmap_update_bits(map, CNTL_ADDR, mask, 0); if (ret) return ret; return 0; } static int resume_8960(struct tsens_priv *priv) { int ret; struct regmap *map = priv->tm_map; ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST); if (ret) return ret; /* * Separate CONFIG restore is not needed only for 8660 as * config is part of CTRL Addr and its restored as such */ if (priv->num_sensors > 1) { ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG); if (ret) return ret; } ret = regmap_write(map, THRESHOLD_ADDR, priv->ctx.threshold); if (ret) return ret; ret = regmap_write(map, CNTL_ADDR, priv->ctx.control); if (ret) return ret; return 0; } static int enable_8960(struct tsens_priv *priv, int id) { int ret; u32 reg, mask = BIT(id); ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg); if (ret) return ret; /* HARDWARE BUG: * On platforms with more than 6 sensors, all remaining sensors * must be enabled together, otherwise undefined results are expected. * (Sensor 6-7 disabled, Sensor 3 disabled...) In the original driver, * all the sensors are enabled in one step hence this bug is not * triggered. */ if (id > 5) mask = GENMASK(10, 6); mask <<= SENSOR0_SHIFT; /* Sensors already enabled. Skip. */ if ((reg & mask) == mask) return 0; ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST); if (ret) return ret; reg |= MEASURE_PERIOD; if (priv->num_sensors > 1) reg |= mask | SLP_CLK_ENA | EN; else reg |= mask | SLP_CLK_ENA_8660 | EN; ret = regmap_write(priv->tm_map, CNTL_ADDR, reg); if (ret) return ret; return 0; } static void disable_8960(struct tsens_priv *priv) { int ret; u32 reg_cntl; u32 mask; mask = GENMASK(priv->num_sensors - 1, 0); mask <<= SENSOR0_SHIFT; mask |= EN; ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg_cntl); if (ret) return; reg_cntl &= ~mask; if (priv->num_sensors > 1) reg_cntl &= ~SLP_CLK_ENA; else reg_cntl &= ~SLP_CLK_ENA_8660; regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl); } static int calibrate_8960(struct tsens_priv *priv) { int i; char *data; u32 p1[11]; data = qfprom_read(priv->dev, "calib"); if (IS_ERR(data)) data = qfprom_read(priv->dev, "calib_backup"); if (IS_ERR(data)) return PTR_ERR(data); for (i = 0; i < priv->num_sensors; i++) { p1[i] = data[i]; priv->sensor[i].slope = tsens_msm8960_slope[i]; } compute_intercept_slope(priv, p1, NULL, ONE_PT_CALIB); kfree(data); return 0; } static const struct reg_field tsens_8960_regfields[MAX_REGFIELDS] = { /* ----- SROT ------ */ /* No VERSION information */ /* CNTL */ [TSENS_EN] = REG_FIELD(CNTL_ADDR, 0, 0), [TSENS_SW_RST] = REG_FIELD(CNTL_ADDR, 1, 1), /* 8960 has 5 sensors, 8660 has 11, we only handle 5 */ [SENSOR_EN] = REG_FIELD(CNTL_ADDR, 3, 7), /* ----- TM ------ */ /* INTERRUPT ENABLE */ /* NO INTERRUPT ENABLE */ /* Single UPPER/LOWER TEMPERATURE THRESHOLD for all sensors */ [LOW_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 0, 7), [UP_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 8, 15), /* MIN_THRESH_0 and MAX_THRESH_0 are not present in the regfield * Recycle CRIT_THRESH_0 and 1 to set the required regs to hardcoded temp * MIN_THRESH_0 -> CRIT_THRESH_1 * MAX_THRESH_0 -> CRIT_THRESH_0 */ [CRIT_THRESH_1] = REG_FIELD(THRESHOLD_ADDR, 16, 23), [CRIT_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 24, 31), /* UPPER/LOWER INTERRUPT [CLEAR/STATUS] */ /* 1 == clear, 0 == normal operation */ [LOW_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 9, 9), [UP_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 10, 10), /* NO CRITICAL INTERRUPT SUPPORT on 8960 */ /* Sn_STATUS */ [LAST_TEMP_0] = REG_FIELD(S0_STATUS_OFF, 0, 7), [LAST_TEMP_1] = REG_FIELD(S1_STATUS_OFF, 0, 7), [LAST_TEMP_2] = REG_FIELD(S2_STATUS_OFF, 0, 7), [LAST_TEMP_3] = REG_FIELD(S3_STATUS_OFF, 0, 7), [LAST_TEMP_4] = REG_FIELD(S4_STATUS_OFF, 0, 7), [LAST_TEMP_5] = REG_FIELD(S5_STATUS_OFF, 0, 7), [LAST_TEMP_6] = REG_FIELD(S6_STATUS_OFF, 0, 7), [LAST_TEMP_7] = REG_FIELD(S7_STATUS_OFF, 0, 7), [LAST_TEMP_8] = REG_FIELD(S8_STATUS_OFF, 0, 7), [LAST_TEMP_9] = REG_FIELD(S9_STATUS_OFF, 0, 7), [LAST_TEMP_10] = REG_FIELD(S10_STATUS_OFF, 0, 7), /* No VALID field on 8960 */ /* TSENS_INT_STATUS bits: 1 == threshold violated */ [MIN_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 0, 0), [LOWER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 1, 1), [UPPER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 2, 2), /* No CRITICAL field on 8960 */ [MAX_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 3, 3), /* TRDY: 1=ready, 0=in progress */ [TRDY] = REG_FIELD(INT_STATUS_ADDR, 7, 7), }; static const struct tsens_ops ops_8960 = { .init = init_common, .calibrate = calibrate_8960, .get_temp = get_temp_common, .enable = enable_8960, .disable = disable_8960, .suspend = suspend_8960, .resume = resume_8960, }; static struct tsens_features tsens_8960_feat = { .ver_major = VER_0, .crit_int = 0, .combo_int = 0, .adc = 1, .srot_split = 0, .max_sensors = 11, .trip_min_temp = -40000, .trip_max_temp = 120000, }; struct tsens_plat_data data_8960 = { .num_sensors = 11, .ops = &ops_8960, .feat = &tsens_8960_feat, .fields = tsens_8960_regfields, };
linux-master
drivers/thermal/qcom/tsens-8960.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/iio/consumer.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/thermal.h> #include "../thermal_hwmon.h" #define QPNP_TM_REG_DIG_MAJOR 0x01 #define QPNP_TM_REG_TYPE 0x04 #define QPNP_TM_REG_SUBTYPE 0x05 #define QPNP_TM_REG_STATUS 0x08 #define QPNP_TM_REG_SHUTDOWN_CTRL1 0x40 #define QPNP_TM_REG_ALARM_CTRL 0x46 #define QPNP_TM_TYPE 0x09 #define QPNP_TM_SUBTYPE_GEN1 0x08 #define QPNP_TM_SUBTYPE_GEN2 0x09 #define STATUS_GEN1_STAGE_MASK GENMASK(1, 0) #define STATUS_GEN2_STATE_MASK GENMASK(6, 4) #define STATUS_GEN2_STATE_SHIFT 4 #define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6) #define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0) #define SHUTDOWN_CTRL1_RATE_25HZ BIT(3) #define ALARM_CTRL_FORCE_ENABLE BIT(7) #define THRESH_COUNT 4 #define STAGE_COUNT 3 /* Over-temperature trip point values in mC */ static const long temp_map_gen1[THRESH_COUNT][STAGE_COUNT] = { { 105000, 125000, 145000 }, { 110000, 130000, 150000 }, { 115000, 135000, 155000 }, { 120000, 140000, 160000 }, }; static const long temp_map_gen2_v1[THRESH_COUNT][STAGE_COUNT] = { { 90000, 110000, 140000 }, { 95000, 115000, 145000 }, { 100000, 120000, 150000 }, { 105000, 125000, 155000 }, }; #define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */ #define THRESH_MIN 0 #define THRESH_MAX 3 #define TEMP_STAGE_HYSTERESIS 2000 /* Temperature in Milli Celsius reported during stage 0 if no ADC is present */ #define DEFAULT_TEMP 37000 struct qpnp_tm_chip { struct regmap *map; struct device *dev; struct thermal_zone_device *tz_dev; unsigned int subtype; long temp; unsigned int thresh; unsigned int stage; unsigned int prev_stage; unsigned int base; /* protects .thresh, .stage and chip registers */ struct mutex lock; bool initialized; struct iio_channel *adc; const long (*temp_map)[THRESH_COUNT][STAGE_COUNT]; }; /* This array maps from GEN2 alarm state to GEN1 alarm stage */ static const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3}; static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data) { unsigned int val; int ret; ret = regmap_read(chip->map, chip->base + addr, &val); if (ret < 0) return ret; *data = val; return 0; } static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data) { return regmap_write(chip->map, chip->base + addr, data); } /** * qpnp_tm_decode_temp() - return temperature in mC corresponding to the * specified over-temperature stage * @chip: Pointer to the qpnp_tm chip * @stage: Over-temperature stage * * Return: temperature in mC */ static long qpnp_tm_decode_temp(struct qpnp_tm_chip *chip, unsigned int stage) { if (!chip->temp_map || chip->thresh >= THRESH_COUNT || stage == 0 || stage > STAGE_COUNT) return 0; return (*chip->temp_map)[chip->thresh][stage - 1]; } /** * qpnp_tm_get_temp_stage() - return over-temperature stage * @chip: Pointer to the qpnp_tm chip * * Return: stage (GEN1) or state (GEN2) on success, or errno on failure. */ static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip) { int ret; u8 reg = 0; ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg); if (ret < 0) return ret; if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) ret = reg & STATUS_GEN1_STAGE_MASK; else ret = (reg & STATUS_GEN2_STATE_MASK) >> STATUS_GEN2_STATE_SHIFT; return ret; } /* * This function updates the internal temp value based on the * current thermal stage and threshold as well as the previous stage */ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip) { unsigned int stage, stage_new, stage_old; int ret; WARN_ON(!mutex_is_locked(&chip->lock)); ret = qpnp_tm_get_temp_stage(chip); if (ret < 0) return ret; stage = ret; if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) { stage_new = stage; stage_old = chip->stage; } else { stage_new = alarm_state_map[stage]; stage_old = alarm_state_map[chip->stage]; } if (stage_new > stage_old) { /* increasing stage, use lower bound */ chip->temp = qpnp_tm_decode_temp(chip, stage_new) + TEMP_STAGE_HYSTERESIS; } else if (stage_new < stage_old) { /* decreasing stage, use upper bound */ chip->temp = qpnp_tm_decode_temp(chip, stage_new + 1) - TEMP_STAGE_HYSTERESIS; } chip->stage = stage; return 0; } static int qpnp_tm_get_temp(struct thermal_zone_device *tz, int *temp) { struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz); int ret, mili_celsius; if (!temp) return -EINVAL; if (!chip->initialized) { *temp = DEFAULT_TEMP; return 0; } if (!chip->adc) { mutex_lock(&chip->lock); ret = qpnp_tm_update_temp_no_adc(chip); mutex_unlock(&chip->lock); if (ret < 0) return ret; } else { ret = iio_read_channel_processed(chip->adc, &mili_celsius); if (ret < 0) return ret; chip->temp = mili_celsius; } *temp = chip->temp; return 0; } static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, int temp) { long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1]; long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1]; bool disable_s2_shutdown = false; u8 reg; WARN_ON(!mutex_is_locked(&chip->lock)); /* * Default: S2 and S3 shutdown enabled, thresholds at * lowest threshold set, monitoring at 25Hz */ reg = SHUTDOWN_CTRL1_RATE_25HZ; if (temp == THERMAL_TEMP_INVALID || temp < stage2_threshold_min) { chip->thresh = THRESH_MIN; goto skip; } if (temp <= stage2_threshold_max) { chip->thresh = THRESH_MAX - ((stage2_threshold_max - temp) / TEMP_THRESH_STEP); disable_s2_shutdown = true; } else { chip->thresh = THRESH_MAX; if (chip->adc) disable_s2_shutdown = true; else dev_warn(chip->dev, "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n", temp, stage2_threshold_max, stage2_threshold_max); } skip: reg |= chip->thresh; if (disable_s2_shutdown) reg |= SHUTDOWN_CTRL1_OVERRIDE_S2; return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); } static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp) { struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz); struct thermal_trip trip; int ret; ret = __thermal_zone_get_trip(chip->tz_dev, trip_id, &trip); if (ret) return ret; if (trip.type != THERMAL_TRIP_CRITICAL) return 0; mutex_lock(&chip->lock); ret = qpnp_tm_update_critical_trip_temp(chip, temp); mutex_unlock(&chip->lock); return ret; } static const struct thermal_zone_device_ops qpnp_tm_sensor_ops = { .get_temp = qpnp_tm_get_temp, .set_trip_temp = qpnp_tm_set_trip_temp, }; static irqreturn_t qpnp_tm_isr(int irq, void *data) { struct qpnp_tm_chip *chip = data; thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static int qpnp_tm_get_critical_trip_temp(struct qpnp_tm_chip *chip) { struct thermal_trip trip; int i, ret; for (i = 0; i < thermal_zone_get_num_trips(chip->tz_dev); i++) { ret = thermal_zone_get_trip(chip->tz_dev, i, &trip); if (ret) continue; if (trip.type == THERMAL_TRIP_CRITICAL) return trip.temperature; } return THERMAL_TEMP_INVALID; } /* * This function initializes the internal temp value based on only the * current thermal stage and threshold. Setup threshold control and * disable shutdown override. */ static int qpnp_tm_init(struct qpnp_tm_chip *chip) { unsigned int stage; int ret; u8 reg = 0; int crit_temp; mutex_lock(&chip->lock); ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg); if (ret < 0) goto out; chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK; chip->temp = DEFAULT_TEMP; ret = qpnp_tm_get_temp_stage(chip); if (ret < 0) goto out; chip->stage = ret; stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1 ? chip->stage : alarm_state_map[chip->stage]; if (stage) chip->temp = qpnp_tm_decode_temp(chip, stage); mutex_unlock(&chip->lock); crit_temp = qpnp_tm_get_critical_trip_temp(chip); mutex_lock(&chip->lock); ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp); if (ret < 0) goto out; /* Enable the thermal alarm PMIC module in always-on mode. */ reg = ALARM_CTRL_FORCE_ENABLE; ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg); chip->initialized = true; out: mutex_unlock(&chip->lock); return ret; } static int qpnp_tm_probe(struct platform_device *pdev) { struct qpnp_tm_chip *chip; struct device_node *node; u8 type, subtype, dig_major; u32 res; int ret, irq; node = pdev->dev.of_node; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; dev_set_drvdata(&pdev->dev, chip); chip->dev = &pdev->dev; mutex_init(&chip->lock); chip->map = dev_get_regmap(pdev->dev.parent, NULL); if (!chip->map) return -ENXIO; ret = of_property_read_u32(node, "reg", &res); if (ret < 0) return ret; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; /* ADC based measurements are optional */ chip->adc = devm_iio_channel_get(&pdev->dev, "thermal"); if (IS_ERR(chip->adc)) { ret = PTR_ERR(chip->adc); chip->adc = NULL; if (ret == -EPROBE_DEFER) return ret; } chip->base = res; ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "could not read type\n"); ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "could not read subtype\n"); ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MAJOR, &dig_major); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "could not read dig_major\n"); if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1 && subtype != QPNP_TM_SUBTYPE_GEN2)) { dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n", type, subtype); return -ENODEV; } chip->subtype = subtype; if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 1) chip->temp_map = &temp_map_gen2_v1; else chip->temp_map = &temp_map_gen1; /* * Register the sensor before initializing the hardware to be able to * read the trip points. get_temp() returns the default temperature * before the hardware initialization is completed. */ chip->tz_dev = devm_thermal_of_zone_register( &pdev->dev, 0, chip, &qpnp_tm_sensor_ops); if (IS_ERR(chip->tz_dev)) return dev_err_probe(&pdev->dev, PTR_ERR(chip->tz_dev), "failed to register sensor\n"); ret = qpnp_tm_init(chip); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "init failed\n"); devm_thermal_add_hwmon_sysfs(&pdev->dev, chip->tz_dev); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr, IRQF_ONESHOT, node->name, chip); if (ret < 0) return ret; thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED); return 0; } static const struct of_device_id qpnp_tm_match_table[] = { { .compatible = "qcom,spmi-temp-alarm" }, { } }; MODULE_DEVICE_TABLE(of, qpnp_tm_match_table); static struct platform_driver qpnp_tm_driver = { .driver = { .name = "spmi-temp-alarm", .of_match_table = qpnp_tm_match_table, }, .probe = qpnp_tm_probe, }; module_platform_driver(qpnp_tm_driver); MODULE_ALIAS("platform:spmi-temp-alarm"); MODULE_DESCRIPTION("QPNP PMIC Temperature Alarm driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/qcom/qcom-spmi-temp-alarm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Rafał Miłecki <[email protected]> */ #include <linux/module.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/thermal.h> #define PVTMON_CONTROL0 0x00 #define PVTMON_CONTROL0_SEL_MASK 0x0000000e #define PVTMON_CONTROL0_SEL_TEMP_MONITOR 0x00000000 #define PVTMON_CONTROL0_SEL_TEST_MODE 0x0000000e #define PVTMON_STATUS 0x08 static int ns_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { void __iomem *pvtmon = thermal_zone_device_priv(tz); int offset = thermal_zone_get_offset(tz); int slope = thermal_zone_get_slope(tz); u32 val; val = readl(pvtmon + PVTMON_CONTROL0); if ((val & PVTMON_CONTROL0_SEL_MASK) != PVTMON_CONTROL0_SEL_TEMP_MONITOR) { /* Clear current mode selection */ val &= ~PVTMON_CONTROL0_SEL_MASK; /* Set temp monitor mode (it's the default actually) */ val |= PVTMON_CONTROL0_SEL_TEMP_MONITOR; writel(val, pvtmon + PVTMON_CONTROL0); } val = readl(pvtmon + PVTMON_STATUS); *temp = slope * val + offset; return 0; } static const struct thermal_zone_device_ops ns_thermal_ops = { .get_temp = ns_thermal_get_temp, }; static int ns_thermal_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct thermal_zone_device *tz; void __iomem *pvtmon; pvtmon = of_iomap(dev_of_node(dev), 0); if (WARN_ON(!pvtmon)) return -ENOENT; tz = devm_thermal_of_zone_register(dev, 0, pvtmon, &ns_thermal_ops); if (IS_ERR(tz)) { iounmap(pvtmon); return PTR_ERR(tz); } platform_set_drvdata(pdev, pvtmon); return 0; } static int ns_thermal_remove(struct platform_device *pdev) { void __iomem *pvtmon = platform_get_drvdata(pdev); iounmap(pvtmon); return 0; } static const struct of_device_id ns_thermal_of_match[] = { { .compatible = "brcm,ns-thermal", }, {}, }; MODULE_DEVICE_TABLE(of, ns_thermal_of_match); static struct platform_driver ns_thermal_driver = { .probe = ns_thermal_probe, .remove = ns_thermal_remove, .driver = { .name = "ns-thermal", .of_match_table = ns_thermal_of_match, }, }; module_platform_driver(ns_thermal_driver); MODULE_AUTHOR("Rafał Miłecki <[email protected]>"); MODULE_DESCRIPTION("Northstar thermal driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/thermal/broadcom/ns-thermal.c
// SPDX-License-Identifier: GPL-2.0+ /* * Broadcom AVS RO thermal sensor driver * * based on brcmstb_thermal * * Copyright (C) 2020 Stefan Wahren */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/thermal.h> #include "../thermal_hwmon.h" #define AVS_RO_TEMP_STATUS 0x200 #define AVS_RO_TEMP_STATUS_VALID_MSK (BIT(16) | BIT(10)) #define AVS_RO_TEMP_STATUS_DATA_MSK GENMASK(9, 0) struct bcm2711_thermal_priv { struct regmap *regmap; struct thermal_zone_device *thermal; }; static int bcm2711_get_temp(struct thermal_zone_device *tz, int *temp) { struct bcm2711_thermal_priv *priv = thermal_zone_device_priv(tz); int slope = thermal_zone_get_slope(tz); int offset = thermal_zone_get_offset(tz); u32 val; int ret; ret = regmap_read(priv->regmap, AVS_RO_TEMP_STATUS, &val); if (ret) return ret; if (!(val & AVS_RO_TEMP_STATUS_VALID_MSK)) return -EIO; val &= AVS_RO_TEMP_STATUS_DATA_MSK; /* Convert a HW code to a temperature reading (millidegree celsius) */ *temp = slope * val + offset; return 0; } static const struct thermal_zone_device_ops bcm2711_thermal_of_ops = { .get_temp = bcm2711_get_temp, }; static const struct of_device_id bcm2711_thermal_id_table[] = { { .compatible = "brcm,bcm2711-thermal" }, {}, }; MODULE_DEVICE_TABLE(of, bcm2711_thermal_id_table); static int bcm2711_thermal_probe(struct platform_device *pdev) { struct thermal_zone_device *thermal; struct bcm2711_thermal_priv *priv; struct device *dev = &pdev->dev; struct device_node *parent; struct regmap *regmap; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* get regmap from syscon node */ parent = of_get_parent(dev->of_node); /* parent should be syscon node */ regmap = syscon_node_to_regmap(parent); of_node_put(parent); if (IS_ERR(regmap)) { ret = PTR_ERR(regmap); dev_err(dev, "failed to get regmap: %d\n", ret); return ret; } priv->regmap = regmap; thermal = devm_thermal_of_zone_register(dev, 0, priv, &bcm2711_thermal_of_ops); if (IS_ERR(thermal)) { ret = PTR_ERR(thermal); dev_err(dev, "could not register sensor: %d\n", ret); return ret; } priv->thermal = thermal; return thermal_add_hwmon_sysfs(thermal); } static struct platform_driver bcm2711_thermal_driver = { .probe = bcm2711_thermal_probe, .driver = { .name = "bcm2711_thermal", .of_match_table = bcm2711_thermal_id_table, }, }; module_platform_driver(bcm2711_thermal_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefan Wahren"); MODULE_DESCRIPTION("Broadcom AVS RO thermal sensor driver");
linux-master
drivers/thermal/broadcom/bcm2711_thermal.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Broadcom BCM2835 SoC temperature sensor * * Copyright (C) 2016 Martin Sperl */ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/thermal.h> #include "../thermal_hwmon.h" #define BCM2835_TS_TSENSCTL 0x00 #define BCM2835_TS_TSENSSTAT 0x04 #define BCM2835_TS_TSENSCTL_PRWDW BIT(0) #define BCM2835_TS_TSENSCTL_RSTB BIT(1) /* * bandgap reference voltage in 6 mV increments * 000b = 1178 mV, 001b = 1184 mV, ... 111b = 1220 mV */ #define BCM2835_TS_TSENSCTL_CTRL_BITS 3 #define BCM2835_TS_TSENSCTL_CTRL_SHIFT 2 #define BCM2835_TS_TSENSCTL_CTRL_MASK \ GENMASK(BCM2835_TS_TSENSCTL_CTRL_BITS + \ BCM2835_TS_TSENSCTL_CTRL_SHIFT - 1, \ BCM2835_TS_TSENSCTL_CTRL_SHIFT) #define BCM2835_TS_TSENSCTL_CTRL_DEFAULT 1 #define BCM2835_TS_TSENSCTL_EN_INT BIT(5) #define BCM2835_TS_TSENSCTL_DIRECT BIT(6) #define BCM2835_TS_TSENSCTL_CLR_INT BIT(7) #define BCM2835_TS_TSENSCTL_THOLD_SHIFT 8 #define BCM2835_TS_TSENSCTL_THOLD_BITS 10 #define BCM2835_TS_TSENSCTL_THOLD_MASK \ GENMASK(BCM2835_TS_TSENSCTL_THOLD_BITS + \ BCM2835_TS_TSENSCTL_THOLD_SHIFT - 1, \ BCM2835_TS_TSENSCTL_THOLD_SHIFT) /* * time how long the block to be asserted in reset * which based on a clock counter (TSENS clock assumed) */ #define BCM2835_TS_TSENSCTL_RSTDELAY_SHIFT 18 #define BCM2835_TS_TSENSCTL_RSTDELAY_BITS 8 #define BCM2835_TS_TSENSCTL_REGULEN BIT(26) #define BCM2835_TS_TSENSSTAT_DATA_BITS 10 #define BCM2835_TS_TSENSSTAT_DATA_SHIFT 0 #define BCM2835_TS_TSENSSTAT_DATA_MASK \ GENMASK(BCM2835_TS_TSENSSTAT_DATA_BITS + \ BCM2835_TS_TSENSSTAT_DATA_SHIFT - 1, \ BCM2835_TS_TSENSSTAT_DATA_SHIFT) #define BCM2835_TS_TSENSSTAT_VALID BIT(10) #define BCM2835_TS_TSENSSTAT_INTERRUPT BIT(11) struct bcm2835_thermal_data { struct thermal_zone_device *tz; void __iomem *regs; struct clk *clk; struct dentry *debugfsdir; }; static int bcm2835_thermal_adc2temp(u32 adc, int offset, int slope) { return offset + slope * adc; } static int bcm2835_thermal_temp2adc(int temp, int offset, int slope) { temp -= offset; temp /= slope; if (temp < 0) temp = 0; if (temp >= BIT(BCM2835_TS_TSENSSTAT_DATA_BITS)) temp = BIT(BCM2835_TS_TSENSSTAT_DATA_BITS) - 1; return temp; } static int bcm2835_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct bcm2835_thermal_data *data = thermal_zone_device_priv(tz); u32 val = readl(data->regs + BCM2835_TS_TSENSSTAT); if (!(val & BCM2835_TS_TSENSSTAT_VALID)) return -EIO; val &= BCM2835_TS_TSENSSTAT_DATA_MASK; *temp = bcm2835_thermal_adc2temp( val, thermal_zone_get_offset(data->tz), thermal_zone_get_slope(data->tz)); return 0; } static const struct debugfs_reg32 bcm2835_thermal_regs[] = { { .name = "ctl", .offset = 0 }, { .name = "stat", .offset = 4 } }; static void bcm2835_thermal_debugfs(struct platform_device *pdev) { struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); struct debugfs_regset32 *regset; data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL); regset = devm_kzalloc(&pdev->dev, sizeof(*regset), GFP_KERNEL); if (!regset) return; regset->regs = bcm2835_thermal_regs; regset->nregs = ARRAY_SIZE(bcm2835_thermal_regs); regset->base = data->regs; debugfs_create_regset32("regset", 0444, data->debugfsdir, regset); } static const struct thermal_zone_device_ops bcm2835_thermal_ops = { .get_temp = bcm2835_thermal_get_temp, }; /* * Note: as per Raspberry Foundation FAQ * (https://www.raspberrypi.org/help/faqs/#performanceOperatingTemperature) * the recommended temperature range for the SoC -40C to +85C * so the trip limit is set to 80C. * this applies to all the BCM283X SoC */ static const struct of_device_id bcm2835_thermal_of_match_table[] = { { .compatible = "brcm,bcm2835-thermal", }, { .compatible = "brcm,bcm2836-thermal", }, { .compatible = "brcm,bcm2837-thermal", }, {}, }; MODULE_DEVICE_TABLE(of, bcm2835_thermal_of_match_table); static int bcm2835_thermal_probe(struct platform_device *pdev) { const struct of_device_id *match; struct thermal_zone_device *tz; struct bcm2835_thermal_data *data; int err = 0; u32 val; unsigned long rate; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; match = of_match_device(bcm2835_thermal_of_match_table, &pdev->dev); if (!match) return -EINVAL; data->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(data->regs)) { err = PTR_ERR(data->regs); return err; } data->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(data->clk)) { err = PTR_ERR(data->clk); if (err != -EPROBE_DEFER) dev_err(&pdev->dev, "Could not get clk: %d\n", err); return err; } err = clk_prepare_enable(data->clk); if (err) return err; rate = clk_get_rate(data->clk); if ((rate < 1920000) || (rate > 5000000)) dev_warn(&pdev->dev, "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n", data->clk, rate); /* register of thermal sensor and get info from DT */ tz = devm_thermal_of_zone_register(&pdev->dev, 0, data, &bcm2835_thermal_ops); if (IS_ERR(tz)) { err = PTR_ERR(tz); dev_err(&pdev->dev, "Failed to register the thermal device: %d\n", err); goto err_clk; } /* * right now the FW does set up the HW-block, so we are not * touching the configuration registers. * But if the HW is not enabled, then set it up * using "sane" values used by the firmware right now. */ val = readl(data->regs + BCM2835_TS_TSENSCTL); if (!(val & BCM2835_TS_TSENSCTL_RSTB)) { struct thermal_trip trip; int offset, slope; slope = thermal_zone_get_slope(tz); offset = thermal_zone_get_offset(tz); /* * For now we deal only with critical, otherwise * would need to iterate */ err = thermal_zone_get_trip(tz, 0, &trip); if (err < 0) { dev_err(&pdev->dev, "Not able to read trip_temp: %d\n", err); goto err_tz; } /* set bandgap reference voltage and enable voltage regulator */ val = (BCM2835_TS_TSENSCTL_CTRL_DEFAULT << BCM2835_TS_TSENSCTL_CTRL_SHIFT) | BCM2835_TS_TSENSCTL_REGULEN; /* use the recommended reset duration */ val |= (0xFE << BCM2835_TS_TSENSCTL_RSTDELAY_SHIFT); /* trip_adc value from info */ val |= bcm2835_thermal_temp2adc(trip.temperature, offset, slope) << BCM2835_TS_TSENSCTL_THOLD_SHIFT; /* write the value back to the register as 2 steps */ writel(val, data->regs + BCM2835_TS_TSENSCTL); val |= BCM2835_TS_TSENSCTL_RSTB; writel(val, data->regs + BCM2835_TS_TSENSCTL); } data->tz = tz; platform_set_drvdata(pdev, data); /* * Thermal_zone doesn't enable hwmon as default, * enable it here */ err = thermal_add_hwmon_sysfs(tz); if (err) goto err_tz; bcm2835_thermal_debugfs(pdev); return 0; err_tz: devm_thermal_of_zone_unregister(&pdev->dev, tz); err_clk: clk_disable_unprepare(data->clk); return err; } static int bcm2835_thermal_remove(struct platform_device *pdev) { struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); debugfs_remove_recursive(data->debugfsdir); clk_disable_unprepare(data->clk); return 0; } static struct platform_driver bcm2835_thermal_driver = { .probe = bcm2835_thermal_probe, .remove = bcm2835_thermal_remove, .driver = { .name = "bcm2835_thermal", .of_match_table = bcm2835_thermal_of_match_table, }, }; module_platform_driver(bcm2835_thermal_driver); MODULE_AUTHOR("Martin Sperl"); MODULE_DESCRIPTION("Thermal driver for bcm2835 chip"); MODULE_LICENSE("GPL");
linux-master
drivers/thermal/broadcom/bcm2835_thermal.c