python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, Sony Mobile Communications AB.
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/soc/qcom/smd-rpm.h>
struct qcom_rpm_reg {
struct device *dev;
struct qcom_smd_rpm *rpm;
u32 type;
u32 id;
struct regulator_desc desc;
int is_enabled;
int uV;
u32 load;
unsigned int enabled_updated:1;
unsigned int uv_updated:1;
unsigned int load_updated:1;
};
struct rpm_regulator_req {
__le32 key;
__le32 nbytes;
__le32 value;
};
#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
#define RPM_KEY_UV 0x00007675 /* "uv" */
#define RPM_KEY_MA 0x0000616d /* "ma" */
static int rpm_reg_write_active(struct qcom_rpm_reg *vreg)
{
struct rpm_regulator_req req[3];
int reqlen = 0;
int ret;
if (vreg->enabled_updated) {
req[reqlen].key = cpu_to_le32(RPM_KEY_SWEN);
req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
req[reqlen].value = cpu_to_le32(vreg->is_enabled);
reqlen++;
}
if (vreg->uv_updated && vreg->is_enabled) {
req[reqlen].key = cpu_to_le32(RPM_KEY_UV);
req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
req[reqlen].value = cpu_to_le32(vreg->uV);
reqlen++;
}
if (vreg->load_updated && vreg->is_enabled) {
req[reqlen].key = cpu_to_le32(RPM_KEY_MA);
req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
req[reqlen].value = cpu_to_le32(vreg->load / 1000);
reqlen++;
}
if (!reqlen)
return 0;
ret = qcom_rpm_smd_write(vreg->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
vreg->type, vreg->id,
req, sizeof(req[0]) * reqlen);
if (!ret) {
vreg->enabled_updated = 0;
vreg->uv_updated = 0;
vreg->load_updated = 0;
}
return ret;
}
static int rpm_reg_enable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
int ret;
vreg->is_enabled = 1;
vreg->enabled_updated = 1;
ret = rpm_reg_write_active(vreg);
if (ret)
vreg->is_enabled = 0;
return ret;
}
static int rpm_reg_is_enabled(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
return vreg->is_enabled;
}
static int rpm_reg_disable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
int ret;
vreg->is_enabled = 0;
vreg->enabled_updated = 1;
ret = rpm_reg_write_active(vreg);
if (ret)
vreg->is_enabled = 1;
return ret;
}
static int rpm_reg_get_voltage(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
return vreg->uV;
}
static int rpm_reg_set_voltage(struct regulator_dev *rdev,
int min_uV,
int max_uV,
unsigned *selector)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
int ret;
int old_uV = vreg->uV;
vreg->uV = min_uV;
vreg->uv_updated = 1;
ret = rpm_reg_write_active(vreg);
if (ret)
vreg->uV = old_uV;
return ret;
}
static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
u32 old_load = vreg->load;
int ret;
vreg->load = load_uA;
vreg->load_updated = 1;
ret = rpm_reg_write_active(vreg);
if (ret)
vreg->load = old_load;
return ret;
}
static const struct regulator_ops rpm_smps_ldo_ops = {
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
.list_voltage = regulator_list_voltage_linear_range,
.get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
.set_load = rpm_reg_set_load,
};
static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
.get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
.set_load = rpm_reg_set_load,
};
static const struct regulator_ops rpm_switch_ops = {
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
};
static const struct regulator_ops rpm_bob_ops = {
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
.get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
};
static const struct regulator_ops rpm_mp5496_ops = {
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
.list_voltage = regulator_list_voltage_linear_range,
.get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
};
static const struct regulator_desc pma8084_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 159,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pma8084_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
.n_linear_ranges = 2,
.n_voltages = 262,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pma8084_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
.n_linear_ranges = 3,
.n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pma8084_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 64,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pma8084_switch = {
.ops = &rpm_switch_ops,
};
static const struct regulator_desc pm8226_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1575000, 96, 158, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 159,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8226_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
.n_linear_ranges = 2,
.n_voltages = 262,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8226_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
.n_linear_ranges = 3,
.n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8226_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 64,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8226_switch = {
.ops = &rpm_switch_ops,
};
static const struct regulator_desc pm8x41_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1575000, 96, 158, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 159,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8841_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
.n_linear_ranges = 2,
.n_voltages = 262,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_boost = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
},
.n_linear_ranges = 1,
.n_voltages = 31,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
.n_linear_ranges = 3,
.n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 64,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_lnldo = {
.fixed_uV = 1740000,
.n_voltages = 1,
.ops = &rpm_smps_ldo_ops_fixed,
};
static const struct regulator_desc pm8941_switch = {
.ops = &rpm_switch_ops,
};
static const struct regulator_desc pm8916_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8916_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 93, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 94,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8916_buck_lvo_smps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(750000, 96, 127, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8916_buck_hvo_smps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1550000, 0, 31, 25000),
},
.n_linear_ranges = 1,
.n_voltages = 32,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8950_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1550000, 96, 127, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8950_ftsmps2p5 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(80000, 0, 255, 5000),
REGULATOR_LINEAR_RANGE(160000, 256, 460, 10000),
},
.n_linear_ranges = 2,
.n_voltages = 461,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8950_ult_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 202, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 203,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8950_ult_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8950_pldo_lv = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1500000, 0, 16, 25000),
},
.n_linear_ranges = 1,
.n_voltages = 17,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8950_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(975000, 0, 164, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 165,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8953_lnldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(690000, 0, 7, 60000),
REGULATOR_LINEAR_RANGE(1380000, 8, 15, 120000),
},
.n_linear_ranges = 2,
.n_voltages = 16,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8953_ult_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 93, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 94,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8994_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 159,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8994_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000),
REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
},
.n_linear_ranges = 2,
.n_voltages = 350,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8994_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 64,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8994_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
.n_linear_ranges = 3,
.n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8994_switch = {
.ops = &rpm_switch_ops,
};
static const struct regulator_desc pm8994_lnldo = {
.fixed_uV = 1740000,
.n_voltages = 1,
.ops = &rpm_smps_ldo_ops_fixed,
};
static const struct regulator_desc pmi8994_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000),
REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
},
.n_linear_ranges = 2,
.n_voltages = 350,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pmi8994_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 80, 12500),
REGULATOR_LINEAR_RANGE(700000, 81, 141, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 142,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pmi8994_bby = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(3000000, 0, 44, 50000),
},
.n_linear_ranges = 1,
.n_voltages = 45,
.ops = &rpm_bob_ops,
};
static const struct regulator_desc pm8998_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
},
.n_linear_ranges = 1,
.n_voltages = 259,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8998_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 216,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8998_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8998_pldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 256,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8998_pldo_lv = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8998_switch = {
.ops = &rpm_switch_ops,
};
static const struct regulator_desc pmi8998_bob = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
},
.n_linear_ranges = 1,
.n_voltages = 84,
.ops = &rpm_bob_ops,
};
static const struct regulator_desc pm660_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(355000, 0, 199, 5000),
},
.n_linear_ranges = 1,
.n_voltages = 200,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm660_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 216, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 217,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm660_ht_nldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(312000, 0, 124, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 125,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm660_ht_lvpldo = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 63,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm660_nldo660 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 124,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm660_pldo660 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 256,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm660l_bob = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1800000, 0, 84, 32000),
},
.n_linear_ranges = 1,
.n_voltages = 85,
.ops = &rpm_bob_ops,
};
static const struct regulator_desc pm6125_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(300000, 0, 268, 4000),
},
.n_linear_ranges = 1,
.n_voltages = 269,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pmic5_ftsmps520 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
},
.n_linear_ranges = 1,
.n_voltages = 264,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pmic5_hfsmps515 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
},
.n_linear_ranges = 1,
.n_voltages = 236,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pms405_hfsmps3 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 216,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pms405_nldo300 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pms405_nldo1200 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pms405_pldo50 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1664000, 0, 128, 16000),
},
.n_linear_ranges = 1,
.n_voltages = 129,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pms405_pldo150 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1664000, 0, 128, 16000),
},
.n_linear_ranges = 1,
.n_voltages = 129,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pms405_pldo600 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(1256000, 0, 98, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 99,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc mp5496_smps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(600000, 0, 127, 12500),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_mp5496_ops,
};
static const struct regulator_desc mp5496_ldoa2 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(800000, 0, 127, 25000),
},
.n_linear_ranges = 1,
.n_voltages = 128,
.ops = &rpm_mp5496_ops,
};
static const struct regulator_desc pm2250_lvftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 269, 4000),
},
.n_linear_ranges = 1,
.n_voltages = 270,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm2250_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(640000, 0, 269, 8000),
},
.n_linear_ranges = 1,
.n_voltages = 270,
.ops = &rpm_smps_ldo_ops,
};
struct rpm_regulator_data {
const char *name;
u32 type;
u32 id;
const struct regulator_desc *desc;
const char *supply;
};
static const struct rpm_regulator_data rpm_mp5496_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &mp5496_smps, "s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &mp5496_smps, "s2" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &mp5496_ldoa2, "l2" },
{}
};
static const struct rpm_regulator_data rpm_pm2250_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm2250_lvftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm2250_lvftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm2250_lvftsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm2250_ftsmps, "vdd_s4" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
{}
};
static const struct rpm_regulator_data rpm_pm6125_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm6125_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm6125_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm6125_ftsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm6125_ftsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pm8998_hfsmps, "vdd_s5" },
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm8998_hfsmps, "vdd_s6" },
{ "s7", QCOM_SMD_RPM_SMPA, 7, &pm8998_hfsmps, "vdd_s7" },
{ "s8", QCOM_SMD_RPM_SMPA, 8, &pm6125_ftsmps, "vdd_s8" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l2_l3_l4" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3_l4" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_nldo660, "vdd_l2_l3_l4" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l6_l8" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l6_l8" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l9_l11" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l9_l11" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l12_l16" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l12_l16" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm660_pldo660, "vdd_l23_l24" },
{ "l24", QCOM_SMD_RPM_LDOA, 24, &pm660_pldo660, "vdd_l23_l24" },
{ }
};
static const struct rpm_regulator_data rpm_pm660_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm660_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm660_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm660_ftsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm660_hfsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pm660_hfsmps, "vdd_s5" },
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm660_hfsmps, "vdd_s6" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l6_l7" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_ht_nldo, "vdd_l2_l3" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3" },
/* l4 is unaccessible on PM660 */
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_ht_nldo, "vdd_l5" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_ht_nldo, "vdd_l1_l6_l7" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_ht_nldo, "vdd_l1_l6_l7" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
{ }
};
static const struct rpm_regulator_data rpm_pm660l_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPB, 1, &pm660_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPB, 2, &pm660_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_RWCX, 0, &pm660_ftsmps, "vdd_s3_s4" },
{ "s5", QCOM_SMD_RPM_RWMX, 0, &pm660_ftsmps, "vdd_s5" },
{ "l1", QCOM_SMD_RPM_LDOB, 1, &pm660_nldo660, "vdd_l1_l9_l10" },
{ "l2", QCOM_SMD_RPM_LDOB, 2, &pm660_pldo660, "vdd_l2" },
{ "l3", QCOM_SMD_RPM_LDOB, 3, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
{ "l4", QCOM_SMD_RPM_LDOB, 4, &pm660_pldo660, "vdd_l4_l6" },
{ "l5", QCOM_SMD_RPM_LDOB, 5, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
{ "l6", QCOM_SMD_RPM_LDOB, 6, &pm660_pldo660, "vdd_l4_l6" },
{ "l7", QCOM_SMD_RPM_LDOB, 7, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
{ "l8", QCOM_SMD_RPM_LDOB, 8, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
{ "l9", QCOM_SMD_RPM_RWLC, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
{ "l10", QCOM_SMD_RPM_RWLM, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
{ "bob", QCOM_SMD_RPM_BOBB, 1, &pm660l_bob, "vdd_bob", },
{ }
};
static const struct rpm_regulator_data rpm_pm8226_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8226_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8226_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8226_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8226_hfsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pm8226_hfsmps, "vdd_s5" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8226_nldo, "vdd_l3_l24_l26" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8226_pldo, "vdd_l10_l11_l13" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l10_l11_l13" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8226_pldo, "vdd_l12_l14" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8226_pldo, "vdd_l10_l11_l13" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8226_pldo, "vdd_l12_l14" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
{ "l24", QCOM_SMD_RPM_LDOA, 24, &pm8226_nldo, "vdd_l3_l24_l26" },
{ "l25", QCOM_SMD_RPM_LDOA, 25, &pm8226_pldo, "vdd_l25" },
{ "l26", QCOM_SMD_RPM_LDOA, 26, &pm8226_nldo, "vdd_l3_l24_l26" },
{ "l27", QCOM_SMD_RPM_LDOA, 27, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
{ "l28", QCOM_SMD_RPM_LDOA, 28, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
{ "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8226_switch, "vdd_lvs1" },
{}
};
static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPB, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPB, 3, &pm8x41_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPB, 4, &pm8841_ftsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPB, 5, &pm8841_ftsmps, "vdd_s5" },
{ "s6", QCOM_SMD_RPM_SMPB, 6, &pm8841_ftsmps, "vdd_s6" },
{ "s7", QCOM_SMD_RPM_SMPB, 7, &pm8841_ftsmps, "vdd_s7" },
{ "s8", QCOM_SMD_RPM_SMPB, 8, &pm8841_ftsmps, "vdd_s8" },
{}
};
static const struct rpm_regulator_data rpm_pm8909_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_hvo_smps, "vdd_s2" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8916_nldo, "vdd_l1" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8916_nldo, "vdd_l2_l5" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8916_nldo, "vdd_l3_l6_l10" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8916_pldo, "vdd_l4_l7" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_pldo, "vdd_l2_l5" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l3_l6_l10" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l4_l7" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8916_pldo, "vdd_l8_l11_l15_l18" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8916_nldo, "vdd_l3_l6_l10" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l8_l11_l15_l18" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8916_pldo, "vdd_l13" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8916_pldo, "vdd_l8_l11_l15_l18" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8916_pldo, "vdd_l9_l12_l14_l17" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8916_pldo, "vdd_l8_l11_l15_l18" },
{}
};
static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_lvo_smps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8916_buck_lvo_smps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8916_buck_hvo_smps, "vdd_s4" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8916_nldo, "vdd_l1_l2_l3" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8916_nldo, "vdd_l1_l2_l3" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8916_nldo, "vdd_l1_l2_l3" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8916_pldo, "vdd_l4_l5_l6" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8916_pldo, "vdd_l4_l5_l6" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8916_pldo, "vdd_l4_l5_l6" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8916_pldo, "vdd_l7" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
{}
};
static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8x41_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_BOOST, 1, &pm8941_boost },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8941_nldo, "vdd_l1_l3" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8941_nldo, "vdd_l2_lvs1_2_3" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8941_nldo, "vdd_l1_l3" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8941_nldo, "vdd_l4_l11" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8941_lnldo, "vdd_l5_l7" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8941_lnldo, "vdd_l5_l7" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8941_nldo, "vdd_l4_l11" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm8941_pldo, "vdd_l21" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
{ "l24", QCOM_SMD_RPM_LDOA, 24, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
{ "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8941_switch, "vdd_l2_lvs1_2_3" },
{ "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8941_switch, "vdd_l2_lvs1_2_3" },
{ "lvs3", QCOM_SMD_RPM_VSA, 3, &pm8941_switch, "vdd_l2_lvs1_2_3" },
{ "5vs1", QCOM_SMD_RPM_VSA, 4, &pm8941_switch, "vin_5vs" },
{ "5vs2", QCOM_SMD_RPM_VSA, 5, &pm8941_switch, "vin_5vs" },
{}
};
static const struct rpm_regulator_data rpm_pm8950_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8950_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8950_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8950_hfsmps, "vdd_s4" },
/* S5 is managed via SPMI. */
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_hfsmps, "vdd_s6" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8950_ult_nldo, "vdd_l1_l19" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8950_ult_nldo, "vdd_l2_l23" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8950_ult_nldo, "vdd_l3" },
/* L4 seems not to exist. */
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_nldo, "vdd_l9_l10_l13_l14_l15_l18"},
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l5_l6_l7_l16" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
/* L18 seems not to exist. */
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm8950_pldo, "vdd_l1_l19" },
/* L20 & L21 seem not to exist. */
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm8950_pldo, "vdd_l2_l23" },
{}
};
static const struct rpm_regulator_data rpm_pm8953_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8998_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8998_hfsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8998_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8998_hfsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pm8950_ftsmps2p5, "vdd_s5" },
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_ftsmps2p5, "vdd_s6" },
{ "s7", QCOM_SMD_RPM_SMPA, 7, &pm8998_hfsmps, "vdd_s7" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8953_ult_nldo, "vdd_l1" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8953_ult_nldo, "vdd_l2_l3" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8953_ult_nldo, "vdd_l2_l3" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm8953_ult_nldo, "vdd_l4_l5_l6_l7_l16_l19" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm8953_lnldo, "vdd_l20" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm8953_lnldo, "vdd_l21" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm8953_ult_nldo, "vdd_l23" },
{}
};
static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pm8994_hfsmps, "vdd_s5" },
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm8994_ftsmps, "vdd_s6" },
{ "s7", QCOM_SMD_RPM_SMPA, 7, &pm8994_hfsmps, "vdd_s7" },
{ "s8", QCOM_SMD_RPM_SMPA, 8, &pm8994_ftsmps, "vdd_s8" },
{ "s9", QCOM_SMD_RPM_SMPA, 9, &pm8994_ftsmps, "vdd_s9" },
{ "s10", QCOM_SMD_RPM_SMPA, 10, &pm8994_ftsmps, "vdd_s10" },
{ "s11", QCOM_SMD_RPM_SMPA, 11, &pm8994_ftsmps, "vdd_s11" },
{ "s12", QCOM_SMD_RPM_SMPA, 12, &pm8994_ftsmps, "vdd_s12" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8994_nldo, "vdd_l1" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8994_nldo, "vdd_l2_l26_l28" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8994_nldo, "vdd_l3_l11" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8994_nldo, "vdd_l4_l27_l31" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8994_lnldo, "vdd_l5_l7" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8994_pldo, "vdd_l6_l12_l32" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8994_lnldo, "vdd_l5_l7" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8994_pldo, "vdd_l8_l16_l30" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8994_nldo, "vdd_l3_l11" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8994_pldo, "vdd_l6_l12_l32" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8994_pldo, "vdd_l14_l15" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8994_pldo, "vdd_l14_l15" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8994_pldo, "vdd_l8_l16_l30" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8994_pldo, "vdd_l17_l29" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm8994_pldo, "vdd_l20_l21" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm8994_pldo, "vdd_l20_l21" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
{ "l24", QCOM_SMD_RPM_LDOA, 24, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
{ "l25", QCOM_SMD_RPM_LDOA, 25, &pm8994_pldo, "vdd_l25" },
{ "l26", QCOM_SMD_RPM_LDOA, 26, &pm8994_nldo, "vdd_l2_l26_l28" },
{ "l27", QCOM_SMD_RPM_LDOA, 27, &pm8994_nldo, "vdd_l4_l27_l31" },
{ "l28", QCOM_SMD_RPM_LDOA, 28, &pm8994_nldo, "vdd_l2_l26_l28" },
{ "l29", QCOM_SMD_RPM_LDOA, 29, &pm8994_pldo, "vdd_l17_l29" },
{ "l30", QCOM_SMD_RPM_LDOA, 30, &pm8994_pldo, "vdd_l8_l16_l30" },
{ "l31", QCOM_SMD_RPM_LDOA, 31, &pm8994_nldo, "vdd_l4_l27_l31" },
{ "l32", QCOM_SMD_RPM_LDOA, 32, &pm8994_pldo, "vdd_l6_l12_l32" },
{ "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8994_switch, "vdd_lvs1_2" },
{ "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8994_switch, "vdd_lvs1_2" },
{}
};
static const struct rpm_regulator_data rpm_pm8998_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8998_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8998_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8998_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8998_hfsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pm8998_hfsmps, "vdd_s5" },
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm8998_ftsmps, "vdd_s6" },
{ "s7", QCOM_SMD_RPM_SMPA, 7, &pm8998_ftsmps, "vdd_s7" },
{ "s8", QCOM_SMD_RPM_SMPA, 8, &pm8998_ftsmps, "vdd_s8" },
{ "s9", QCOM_SMD_RPM_SMPA, 9, &pm8998_ftsmps, "vdd_s9" },
{ "s10", QCOM_SMD_RPM_SMPA, 10, &pm8998_ftsmps, "vdd_s10" },
{ "s11", QCOM_SMD_RPM_SMPA, 11, &pm8998_ftsmps, "vdd_s11" },
{ "s12", QCOM_SMD_RPM_SMPA, 12, &pm8998_ftsmps, "vdd_s12" },
{ "s13", QCOM_SMD_RPM_SMPA, 13, &pm8998_ftsmps, "vdd_s13" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8998_nldo, "vdd_l1_l27" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8998_nldo, "vdd_l2_l8_l17" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8998_nldo, "vdd_l3_l11" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8998_nldo, "vdd_l4_l5" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8998_nldo, "vdd_l4_l5" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8998_pldo, "vdd_l6" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8998_pldo_lv, "vdd_l7_l12_l14_l15" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8998_nldo, "vdd_l2_l8_l17" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8998_pldo, "vdd_l9" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8998_pldo, "vdd_l10_l23_l25" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8998_nldo, "vdd_l3_l11" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8998_pldo_lv, "vdd_l7_l12_l14_l15" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8998_pldo, "vdd_l13_l19_l21" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8998_pldo_lv, "vdd_l7_l12_l14_l15" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8998_pldo_lv, "vdd_l7_l12_l14_l15" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8998_pldo, "vdd_l16_l28" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8998_nldo, "vdd_l2_l8_l17" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8998_pldo, "vdd_l18_l22" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm8998_pldo, "vdd_l13_l19_l21" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm8998_pldo, "vdd_l20_l24" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm8998_pldo, "vdd_l13_l19_l21" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm8998_pldo, "vdd_l18_l22" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm8998_pldo, "vdd_l10_l23_l25" },
{ "l24", QCOM_SMD_RPM_LDOA, 24, &pm8998_pldo, "vdd_l20_l24" },
{ "l25", QCOM_SMD_RPM_LDOA, 25, &pm8998_pldo, "vdd_l10_l23_l25" },
{ "l26", QCOM_SMD_RPM_LDOA, 26, &pm8998_nldo, "vdd_l26" },
{ "l27", QCOM_SMD_RPM_LDOA, 27, &pm8998_nldo, "vdd_l1_l27" },
{ "l28", QCOM_SMD_RPM_LDOA, 28, &pm8998_pldo, "vdd_l16_l28" },
{ "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8998_switch, "vdd_lvs1_lvs2" },
{ "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8998_switch, "vdd_lvs1_lvs2" },
{}
};
static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pma8084_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pma8084_ftsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pma8084_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pma8084_hfsmps, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pma8084_hfsmps, "vdd_s5" },
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pma8084_ftsmps, "vdd_s6" },
{ "s7", QCOM_SMD_RPM_SMPA, 7, &pma8084_ftsmps, "vdd_s7" },
{ "s8", QCOM_SMD_RPM_SMPA, 8, &pma8084_ftsmps, "vdd_s8" },
{ "s9", QCOM_SMD_RPM_SMPA, 9, &pma8084_ftsmps, "vdd_s9" },
{ "s10", QCOM_SMD_RPM_SMPA, 10, &pma8084_ftsmps, "vdd_s10" },
{ "s11", QCOM_SMD_RPM_SMPA, 11, &pma8084_ftsmps, "vdd_s11" },
{ "s12", QCOM_SMD_RPM_SMPA, 12, &pma8084_ftsmps, "vdd_s12" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pma8084_nldo, "vdd_l1_l11" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pma8084_pldo, "vdd_l5_l7" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pma8084_pldo, "vdd_l5_l7" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pma8084_pldo, "vdd_l8" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pma8084_nldo, "vdd_l1_l11" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
{ "l14", QCOM_SMD_RPM_LDOA, 14, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
{ "l15", QCOM_SMD_RPM_LDOA, 15, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
{ "l16", QCOM_SMD_RPM_LDOA, 16, &pma8084_pldo, "vdd_l16_l25" },
{ "l17", QCOM_SMD_RPM_LDOA, 17, &pma8084_pldo, "vdd_l17" },
{ "l18", QCOM_SMD_RPM_LDOA, 18, &pma8084_pldo, "vdd_l18" },
{ "l19", QCOM_SMD_RPM_LDOA, 19, &pma8084_pldo, "vdd_l19" },
{ "l20", QCOM_SMD_RPM_LDOA, 20, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
{ "l21", QCOM_SMD_RPM_LDOA, 21, &pma8084_pldo, "vdd_l21" },
{ "l22", QCOM_SMD_RPM_LDOA, 22, &pma8084_pldo, "vdd_l22" },
{ "l23", QCOM_SMD_RPM_LDOA, 23, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
{ "l24", QCOM_SMD_RPM_LDOA, 24, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
{ "l25", QCOM_SMD_RPM_LDOA, 25, &pma8084_pldo, "vdd_l16_l25" },
{ "l26", QCOM_SMD_RPM_LDOA, 26, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
{ "l27", QCOM_SMD_RPM_LDOA, 27, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
{ "lvs1", QCOM_SMD_RPM_VSA, 1, &pma8084_switch },
{ "lvs2", QCOM_SMD_RPM_VSA, 2, &pma8084_switch },
{ "lvs3", QCOM_SMD_RPM_VSA, 3, &pma8084_switch },
{ "lvs4", QCOM_SMD_RPM_VSA, 4, &pma8084_switch },
{ "5vs1", QCOM_SMD_RPM_VSA, 5, &pma8084_switch },
{}
};
static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
{ "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
{}
};
static const struct rpm_regulator_data rpm_pmi8998_regulators[] = {
{ "bob", QCOM_SMD_RPM_BOBB, 1, &pmi8998_bob, "vdd_bob" },
{}
};
static const struct rpm_regulator_data rpm_pmr735a_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPE, 1, &pmic5_ftsmps520, "vdd_s1"},
{ "s2", QCOM_SMD_RPM_SMPE, 2, &pmic5_ftsmps520, "vdd_s2"},
{ "s3", QCOM_SMD_RPM_SMPE, 3, &pmic5_hfsmps515, "vdd_s3"},
{ "l1", QCOM_SMD_RPM_LDOE, 1, &pm660_nldo660, "vdd_l1_l2"},
{ "l2", QCOM_SMD_RPM_LDOE, 2, &pm660_nldo660, "vdd_l1_l2"},
{ "l3", QCOM_SMD_RPM_LDOE, 3, &pm660_nldo660, "vdd_l3"},
{ "l4", QCOM_SMD_RPM_LDOE, 4, &pm660_ht_lvpldo, "vdd_l4"},
{ "l5", QCOM_SMD_RPM_LDOE, 5, &pm660_nldo660, "vdd_l5_l6"},
{ "l6", QCOM_SMD_RPM_LDOE, 6, &pm660_nldo660, "vdd_l5_l6"},
{ "l7", QCOM_SMD_RPM_LDOE, 7, &pm660_pldo660, "vdd_l7_bob"},
{}
};
static const struct rpm_regulator_data rpm_pms405_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pms405_hfsmps3, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pms405_hfsmps3, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pms405_hfsmps3, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pms405_hfsmps3, "vdd_s4" },
{ "s5", QCOM_SMD_RPM_SMPA, 5, &pms405_hfsmps3, "vdd_s5" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pms405_nldo1200, "vdd_l1_l2" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pms405_nldo1200, "vdd_l1_l2" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pms405_nldo1200, "vdd_l3_l8" },
{ "l4", QCOM_SMD_RPM_LDOA, 4, &pms405_nldo300, "vdd_l4" },
{ "l5", QCOM_SMD_RPM_LDOA, 5, &pms405_pldo600, "vdd_l5_l6" },
{ "l6", QCOM_SMD_RPM_LDOA, 6, &pms405_pldo600, "vdd_l5_l6" },
{ "l7", QCOM_SMD_RPM_LDOA, 7, &pms405_pldo150, "vdd_l7" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pms405_nldo1200, "vdd_l3_l8" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pms405_nldo1200, "vdd_l9" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pms405_pldo50, "vdd_l10_l11_l12_l13" },
{ "l11", QCOM_SMD_RPM_LDOA, 11, &pms405_pldo150, "vdd_l10_l11_l12_l13" },
{ "l12", QCOM_SMD_RPM_LDOA, 12, &pms405_pldo150, "vdd_l10_l11_l12_l13" },
{ "l13", QCOM_SMD_RPM_LDOA, 13, &pms405_pldo150, "vdd_l10_l11_l12_l13" },
{}
};
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
{ .compatible = "qcom,rpm-pm2250-regulators", .data = &rpm_pm2250_regulators },
{ .compatible = "qcom,rpm-pm6125-regulators", .data = &rpm_pm6125_regulators },
{ .compatible = "qcom,rpm-pm660-regulators", .data = &rpm_pm660_regulators },
{ .compatible = "qcom,rpm-pm660l-regulators", .data = &rpm_pm660l_regulators },
{ .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
{ .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
{ .compatible = "qcom,rpm-pmi8994-regulators", .data = &rpm_pmi8994_regulators },
{ .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators },
{ .compatible = "qcom,rpm-pmr735a-regulators", .data = &rpm_pmr735a_regulators },
{ .compatible = "qcom,rpm-pms405-regulators", .data = &rpm_pms405_regulators },
{}
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
/**
* rpm_regulator_init_vreg() - initialize all attributes of a qcom_smd-regulator
* @vreg: Pointer to the individual qcom_smd-regulator resource
* @dev: Pointer to the top level qcom_smd-regulator PMIC device
* @node: Pointer to the individual qcom_smd-regulator resource
* device node
* @rpm: Pointer to the rpm bus node
* @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator
* resources defined for the top level PMIC device
*
* Return: 0 on success, errno on failure
*/
static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev,
struct device_node *node, struct qcom_smd_rpm *rpm,
const struct rpm_regulator_data *pmic_rpm_data)
{
struct regulator_config config = {};
const struct rpm_regulator_data *rpm_data;
struct regulator_dev *rdev;
int ret;
for (rpm_data = pmic_rpm_data; rpm_data->name; rpm_data++)
if (of_node_name_eq(node, rpm_data->name))
break;
if (!rpm_data->name) {
dev_err(dev, "Unknown regulator %pOFn\n", node);
return -EINVAL;
}
vreg->dev = dev;
vreg->rpm = rpm;
vreg->type = rpm_data->type;
vreg->id = rpm_data->id;
memcpy(&vreg->desc, rpm_data->desc, sizeof(vreg->desc));
vreg->desc.name = rpm_data->name;
vreg->desc.supply_name = rpm_data->supply;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
vreg->desc.of_match = rpm_data->name;
config.dev = dev;
config.of_node = node;
config.driver_data = vreg;
rdev = devm_regulator_register(dev, &vreg->desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(dev, "%pOFn: devm_regulator_register() failed, ret=%d\n", node, ret);
return ret;
}
return 0;
}
static int rpm_reg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rpm_regulator_data *vreg_data;
struct device_node *node;
struct qcom_rpm_reg *vreg;
struct qcom_smd_rpm *rpm;
int ret;
rpm = dev_get_drvdata(pdev->dev.parent);
if (!rpm) {
dev_err(&pdev->dev, "Unable to retrieve handle to rpm\n");
return -ENODEV;
}
vreg_data = of_device_get_match_data(dev);
if (!vreg_data)
return -ENODEV;
for_each_available_child_of_node(dev->of_node, node) {
vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
if (!vreg) {
of_node_put(node);
return -ENOMEM;
}
ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
if (ret < 0) {
of_node_put(node);
return ret;
}
}
return 0;
}
static struct platform_driver rpm_reg_driver = {
.probe = rpm_reg_probe,
.driver = {
.name = "qcom_rpm_smd_regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rpm_of_match,
},
};
static int __init rpm_reg_init(void)
{
return platform_driver_register(&rpm_reg_driver);
}
subsys_initcall(rpm_reg_init);
static void __exit rpm_reg_exit(void)
{
platform_driver_unregister(&rpm_reg_driver);
}
module_exit(rpm_reg_exit)
MODULE_DESCRIPTION("Qualcomm RPM regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/qcom_smd-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2022 Collabora Ltd.
// Author: AngeloGioacchino Del Regno <[email protected]>
//
// Based on mt6323-regulator.c,
// Copyright (c) 2016 MediaTek Inc.
//
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6331/registers.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/mt6331-regulator.h>
#include <linux/regulator/of_regulator.h>
#define MT6331_LDO_MODE_NORMAL 0
#define MT6331_LDO_MODE_LP 1
/*
* MT6331 regulators information
*
* @desc: standard fields of regulator description.
* @qi: Mask for query enable signal status of regulators
* @vselon_reg: Register sections for hardware control mode of bucks
* @vselctrl_reg: Register for controlling the buck control mode.
* @vselctrl_mask: Mask for query buck's voltage control mode.
* @status_reg: Register for regulator enable status where qi unavailable
* @status_mask: Mask for querying regulator enable status
*/
struct mt6331_regulator_info {
struct regulator_desc desc;
u32 qi;
u32 vselon_reg;
u32 vselctrl_reg;
u32 vselctrl_mask;
u32 modeset_reg;
u32 modeset_mask;
u32 status_reg;
u32 status_mask;
};
#define MT6331_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
vosel, vosel_mask, voselon, vosel_ctrl) \
[MT6331_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6331_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6331_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = (max - min)/step + 1, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(0), \
}, \
.qi = BIT(13), \
.vselon_reg = voselon, \
.vselctrl_reg = vosel_ctrl, \
.vselctrl_mask = BIT(1), \
.status_mask = 0, \
}
#define MT6331_LDO_AO(match, vreg, ldo_volt_table, vosel, vosel_mask) \
[MT6331_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6331_volt_table_ao_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6331_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.volt_table = ldo_volt_table, \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
}, \
}
#define MT6331_LDO_S(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
vosel_mask, _modeset_reg, _modeset_mask, \
_status_reg, _status_mask) \
[MT6331_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6331_volt_table_no_qi_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6331_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.volt_table = ldo_volt_table, \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(enbit), \
}, \
.modeset_reg = _modeset_reg, \
.modeset_mask = _modeset_mask, \
.status_reg = _status_reg, \
.status_mask = _status_mask, \
}
#define MT6331_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
vosel_mask, _modeset_reg, _modeset_mask) \
[MT6331_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = (_modeset_reg ? \
&mt6331_volt_table_ops : \
&mt6331_volt_table_no_ms_ops), \
.type = REGULATOR_VOLTAGE, \
.id = MT6331_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.volt_table = ldo_volt_table, \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(enbit), \
}, \
.qi = BIT(15), \
.modeset_reg = _modeset_reg, \
.modeset_mask = _modeset_mask, \
}
#define MT6331_REG_FIXED(match, vreg, enreg, enbit, qibit, volt, \
_modeset_reg, _modeset_mask) \
[MT6331_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = (_modeset_reg ? \
&mt6331_volt_fixed_ops : \
&mt6331_volt_fixed_no_ms_ops), \
.type = REGULATOR_VOLTAGE, \
.id = MT6331_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = 1, \
.enable_reg = enreg, \
.enable_mask = BIT(enbit), \
.min_uV = volt, \
}, \
.qi = BIT(qibit), \
.modeset_reg = _modeset_reg, \
.modeset_mask = _modeset_mask, \
}
static const struct linear_range buck_volt_range[] = {
REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
};
static const unsigned int ldo_volt_table1[] = {
2800000, 3000000, 0, 3200000
};
static const unsigned int ldo_volt_table2[] = {
1500000, 1800000, 2500000, 2800000,
};
static const unsigned int ldo_volt_table3[] = {
1200000, 1300000, 1500000, 1800000, 2000000, 2800000, 3000000, 3300000,
};
static const unsigned int ldo_volt_table4[] = {
0, 0, 1700000, 1800000, 1860000, 2760000, 3000000, 3100000,
};
static const unsigned int ldo_volt_table5[] = {
1800000, 3300000, 1800000, 3300000,
};
static const unsigned int ldo_volt_table6[] = {
3000000, 3300000,
};
static const unsigned int ldo_volt_table7[] = {
1200000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
};
static const unsigned int ldo_volt_table8[] = {
900000, 1000000, 1100000, 1220000, 1300000, 1500000, 1500000, 1500000,
};
static const unsigned int ldo_volt_table9[] = {
1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1300000,
};
static const unsigned int ldo_volt_table10[] = {
1200000, 1300000, 1500000, 1800000,
};
static const unsigned int ldo_volt_table11[] = {
1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
};
static int mt6331_get_status(struct regulator_dev *rdev)
{
struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
u32 regval;
int ret;
ret = regmap_read(rdev->regmap, info->desc.enable_reg, ®val);
if (ret != 0) {
dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
return ret;
}
return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
}
static int mt6331_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
int val;
switch (mode) {
case REGULATOR_MODE_STANDBY:
val = MT6331_LDO_MODE_LP;
break;
case REGULATOR_MODE_NORMAL:
val = MT6331_LDO_MODE_NORMAL;
break;
default:
return -EINVAL;
}
val <<= ffs(info->modeset_mask) - 1;
return regmap_update_bits(rdev->regmap, info->modeset_reg,
info->modeset_mask, val);
}
static unsigned int mt6331_ldo_get_mode(struct regulator_dev *rdev)
{
struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
unsigned int val;
int ret;
ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
if (ret < 0)
return ret;
val &= info->modeset_mask;
val >>= ffs(info->modeset_mask) - 1;
return (val & BIT(0)) ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops mt6331_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6331_get_status,
};
static const struct regulator_ops mt6331_volt_table_no_ms_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6331_get_status,
};
static const struct regulator_ops mt6331_volt_table_no_qi_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_mode = mt6331_ldo_set_mode,
.get_mode = mt6331_ldo_get_mode,
};
static const struct regulator_ops mt6331_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6331_get_status,
.set_mode = mt6331_ldo_set_mode,
.get_mode = mt6331_ldo_get_mode,
};
static const struct regulator_ops mt6331_volt_table_ao_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
static const struct regulator_ops mt6331_volt_fixed_no_ms_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6331_get_status,
};
static const struct regulator_ops mt6331_volt_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6331_get_status,
.set_mode = mt6331_ldo_set_mode,
.get_mode = mt6331_ldo_get_mode,
};
/* The array is indexed by id(MT6331_ID_XXX) */
static struct mt6331_regulator_info mt6331_regulators[] = {
MT6331_BUCK("buck-vdvfs11", VDVFS11, 700000, 1493750, 6250,
buck_volt_range, MT6331_VDVFS11_CON9,
MT6331_VDVFS11_CON11, GENMASK(6, 0),
MT6331_VDVFS11_CON12, MT6331_VDVFS11_CON7),
MT6331_BUCK("buck-vdvfs12", VDVFS12, 700000, 1493750, 6250,
buck_volt_range, MT6331_VDVFS12_CON9,
MT6331_VDVFS12_CON11, GENMASK(6, 0),
MT6331_VDVFS12_CON12, MT6331_VDVFS12_CON7),
MT6331_BUCK("buck-vdvfs13", VDVFS13, 700000, 1493750, 6250,
buck_volt_range, MT6331_VDVFS13_CON9,
MT6331_VDVFS13_CON11, GENMASK(6, 0),
MT6331_VDVFS13_CON12, MT6331_VDVFS13_CON7),
MT6331_BUCK("buck-vdvfs14", VDVFS14, 700000, 1493750, 6250,
buck_volt_range, MT6331_VDVFS14_CON9,
MT6331_VDVFS14_CON11, GENMASK(6, 0),
MT6331_VDVFS14_CON12, MT6331_VDVFS14_CON7),
MT6331_BUCK("buck-vcore2", VCORE2, 700000, 1493750, 6250,
buck_volt_range, MT6331_VCORE2_CON9,
MT6331_VCORE2_CON11, GENMASK(6, 0),
MT6331_VCORE2_CON12, MT6331_VCORE2_CON7),
MT6331_REG_FIXED("buck-vio18", VIO18, MT6331_VIO18_CON9, 0, 13, 1800000, 0, 0),
MT6331_REG_FIXED("ldo-vrtc", VRTC, MT6331_DIGLDO_CON11, 8, 15, 2800000, 0, 0),
MT6331_REG_FIXED("ldo-vtcxo1", VTCXO1, MT6331_ANALDO_CON1, 10, 15, 2800000,
MT6331_ANALDO_CON1, GENMASK(1, 0)),
MT6331_REG_FIXED("ldo-vtcxo2", VTCXO2, MT6331_ANALDO_CON2, 10, 15, 2800000,
MT6331_ANALDO_CON2, GENMASK(1, 0)),
MT6331_REG_FIXED("ldo-vsram", VSRAM_DVFS1, MT6331_SYSLDO_CON4, 10, 15, 1012500,
MT6331_SYSLDO_CON4, GENMASK(1, 0)),
MT6331_REG_FIXED("ldo-vio28", VIO28, MT6331_DIGLDO_CON1, 10, 15, 2800000,
MT6331_DIGLDO_CON1, GENMASK(1, 0)),
MT6331_LDO("ldo-avdd32aud", AVDD32_AUD, ldo_volt_table1, MT6331_ANALDO_CON3, 10,
MT6331_ANALDO_CON10, GENMASK(6, 5), MT6331_ANALDO_CON3, GENMASK(1, 0)),
MT6331_LDO("ldo-vauxa32", VAUXA32, ldo_volt_table1, MT6331_ANALDO_CON4, 10,
MT6331_ANALDO_CON6, GENMASK(6, 5), MT6331_ANALDO_CON4, GENMASK(1, 0)),
MT6331_LDO("ldo-vemc33", VEMC33, ldo_volt_table6, MT6331_DIGLDO_CON5, 10,
MT6331_DIGLDO_CON17, BIT(6), MT6331_DIGLDO_CON5, GENMASK(1, 0)),
MT6331_LDO("ldo-vibr", VIBR, ldo_volt_table3, MT6331_DIGLDO_CON12, 10,
MT6331_DIGLDO_CON20, GENMASK(6, 4), MT6331_DIGLDO_CON12, GENMASK(1, 0)),
MT6331_LDO("ldo-vmc", VMC, ldo_volt_table5, MT6331_DIGLDO_CON3, 10,
MT6331_DIGLDO_CON15, GENMASK(5, 4), MT6331_DIGLDO_CON3, GENMASK(1, 0)),
MT6331_LDO("ldo-vmch", VMCH, ldo_volt_table6, MT6331_DIGLDO_CON4, 10,
MT6331_DIGLDO_CON16, BIT(6), MT6331_DIGLDO_CON4, GENMASK(1, 0)),
MT6331_LDO("ldo-vmipi", VMIPI, ldo_volt_table3, MT6331_SYSLDO_CON5, 10,
MT6331_SYSLDO_CON13, GENMASK(5, 3), MT6331_SYSLDO_CON5, GENMASK(1, 0)),
MT6331_LDO("ldo-vsim1", VSIM1, ldo_volt_table4, MT6331_DIGLDO_CON8, 10,
MT6331_DIGLDO_CON21, GENMASK(6, 4), MT6331_DIGLDO_CON8, GENMASK(1, 0)),
MT6331_LDO("ldo-vsim2", VSIM2, ldo_volt_table4, MT6331_DIGLDO_CON9, 10,
MT6331_DIGLDO_CON22, GENMASK(6, 4), MT6331_DIGLDO_CON9, GENMASK(1, 0)),
MT6331_LDO("ldo-vusb10", VUSB10, ldo_volt_table9, MT6331_SYSLDO_CON2, 10,
MT6331_SYSLDO_CON10, GENMASK(5, 3), MT6331_SYSLDO_CON2, GENMASK(1, 0)),
MT6331_LDO("ldo-vcama", VCAMA, ldo_volt_table2, MT6331_ANALDO_CON5, 15,
MT6331_ANALDO_CON9, GENMASK(5, 4), 0, 0),
MT6331_LDO_S("ldo-vcamaf", VCAM_AF, ldo_volt_table3, MT6331_DIGLDO_CON2, 10,
MT6331_DIGLDO_CON14, GENMASK(6, 4), MT6331_DIGLDO_CON2, GENMASK(1, 0),
MT6331_EN_STATUS1, BIT(0)),
MT6331_LDO_S("ldo-vcamd", VCAMD, ldo_volt_table8, MT6331_SYSLDO_CON1, 15,
MT6331_SYSLDO_CON9, GENMASK(6, 4), MT6331_SYSLDO_CON1, GENMASK(1, 0),
MT6331_EN_STATUS1, BIT(11)),
MT6331_LDO_S("ldo-vcamio", VCAM_IO, ldo_volt_table10, MT6331_SYSLDO_CON3, 10,
MT6331_SYSLDO_CON11, GENMASK(4, 3), MT6331_SYSLDO_CON3, GENMASK(1, 0),
MT6331_EN_STATUS1, BIT(13)),
MT6331_LDO_S("ldo-vgp1", VGP1, ldo_volt_table3, MT6331_DIGLDO_CON6, 10,
MT6331_DIGLDO_CON19, GENMASK(6, 4), MT6331_DIGLDO_CON6, GENMASK(1, 0),
MT6331_EN_STATUS1, BIT(4)),
MT6331_LDO_S("ldo-vgp2", VGP2, ldo_volt_table10, MT6331_SYSLDO_CON6, 10,
MT6331_SYSLDO_CON14, GENMASK(4, 3), MT6331_SYSLDO_CON6, GENMASK(1, 0),
MT6331_EN_STATUS1, BIT(15)),
MT6331_LDO_S("ldo-vgp3", VGP3, ldo_volt_table10, MT6331_SYSLDO_CON7, 10,
MT6331_SYSLDO_CON15, GENMASK(4, 3), MT6331_SYSLDO_CON7, GENMASK(1, 0),
MT6331_EN_STATUS2, BIT(0)),
MT6331_LDO_S("ldo-vgp4", VGP4, ldo_volt_table7, MT6331_DIGLDO_CON7, 10,
MT6331_DIGLDO_CON18, GENMASK(6, 4), MT6331_DIGLDO_CON7, GENMASK(1, 0),
MT6331_EN_STATUS1, BIT(5)),
MT6331_LDO_AO("ldo-vdig18", VDIG18, ldo_volt_table11,
MT6331_DIGLDO_CON28, GENMASK(14, 12)),
};
static int mt6331_set_buck_vosel_reg(struct platform_device *pdev)
{
struct mt6397_chip *mt6331 = dev_get_drvdata(pdev->dev.parent);
int i;
u32 regval;
for (i = 0; i < MT6331_ID_VREG_MAX; i++) {
if (mt6331_regulators[i].vselctrl_reg) {
if (regmap_read(mt6331->regmap,
mt6331_regulators[i].vselctrl_reg,
®val) < 0) {
dev_err(&pdev->dev,
"Failed to read buck ctrl\n");
return -EIO;
}
if (regval & mt6331_regulators[i].vselctrl_mask) {
mt6331_regulators[i].desc.vsel_reg =
mt6331_regulators[i].vselon_reg;
}
}
}
return 0;
}
static int mt6331_regulator_probe(struct platform_device *pdev)
{
struct mt6397_chip *mt6331 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = {};
struct regulator_dev *rdev;
int i;
u32 reg_value;
/* Query buck controller to select activated voltage register part */
if (mt6331_set_buck_vosel_reg(pdev))
return -EIO;
/* Read PMIC chip revision to update constraints and voltage table */
if (regmap_read(mt6331->regmap, MT6331_HWCID, ®_value) < 0) {
dev_err(&pdev->dev, "Failed to read Chip ID\n");
return -EIO;
}
reg_value &= GENMASK(7, 0);
dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
/*
* ChipID 0x10 is "MT6331 E1", has a different voltage table and
* it's currently not supported in this driver. Upon detection of
* this ID, refuse to register the regulators, as we will wrongly
* interpret the VSEL for this revision, potentially overvolting
* some device.
*/
if (reg_value == 0x10) {
dev_err(&pdev->dev, "Chip version not supported. Bailing out.\n");
return -EINVAL;
}
for (i = 0; i < MT6331_ID_VREG_MAX; i++) {
config.dev = &pdev->dev;
config.driver_data = &mt6331_regulators[i];
config.regmap = mt6331->regmap;
rdev = devm_regulator_register(&pdev->dev,
&mt6331_regulators[i].desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
mt6331_regulators[i].desc.name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id mt6331_platform_ids[] = {
{"mt6331-regulator", 0},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, mt6331_platform_ids);
static struct platform_driver mt6331_regulator_driver = {
.driver = {
.name = "mt6331-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mt6331_regulator_probe,
.id_table = mt6331_platform_ids,
};
module_platform_driver(mt6331_regulator_driver);
MODULE_AUTHOR("AngeloGioacchino Del Regno <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6331 PMIC");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/mt6331-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// wm831x-isink.c -- Current sink driver for the WM831x series
//
// Copyright 2009 Wolfson Microelectronics PLC.
//
// Author: Mark Brown <[email protected]>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/regulator.h>
#include <linux/mfd/wm831x/pdata.h>
#define WM831X_ISINK_MAX_NAME 7
struct wm831x_isink {
char name[WM831X_ISINK_MAX_NAME];
struct regulator_desc desc;
int reg;
struct wm831x *wm831x;
struct regulator_dev *regulator;
};
static int wm831x_isink_enable(struct regulator_dev *rdev)
{
struct wm831x_isink *isink = rdev_get_drvdata(rdev);
struct wm831x *wm831x = isink->wm831x;
int ret;
/* We have a two stage enable: first start the ISINK... */
ret = wm831x_set_bits(wm831x, isink->reg, WM831X_CS1_ENA,
WM831X_CS1_ENA);
if (ret != 0)
return ret;
/* ...then enable drive */
ret = wm831x_set_bits(wm831x, isink->reg, WM831X_CS1_DRIVE,
WM831X_CS1_DRIVE);
if (ret != 0)
wm831x_set_bits(wm831x, isink->reg, WM831X_CS1_ENA, 0);
return ret;
}
static int wm831x_isink_disable(struct regulator_dev *rdev)
{
struct wm831x_isink *isink = rdev_get_drvdata(rdev);
struct wm831x *wm831x = isink->wm831x;
int ret;
ret = wm831x_set_bits(wm831x, isink->reg, WM831X_CS1_DRIVE, 0);
if (ret < 0)
return ret;
ret = wm831x_set_bits(wm831x, isink->reg, WM831X_CS1_ENA, 0);
if (ret < 0)
return ret;
return ret;
}
static int wm831x_isink_is_enabled(struct regulator_dev *rdev)
{
struct wm831x_isink *isink = rdev_get_drvdata(rdev);
struct wm831x *wm831x = isink->wm831x;
int ret;
ret = wm831x_reg_read(wm831x, isink->reg);
if (ret < 0)
return ret;
if ((ret & (WM831X_CS1_ENA | WM831X_CS1_DRIVE)) ==
(WM831X_CS1_ENA | WM831X_CS1_DRIVE))
return 1;
else
return 0;
}
static const struct regulator_ops wm831x_isink_ops = {
.is_enabled = wm831x_isink_is_enabled,
.enable = wm831x_isink_enable,
.disable = wm831x_isink_disable,
.set_current_limit = regulator_set_current_limit_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
};
static irqreturn_t wm831x_isink_irq(int irq, void *data)
{
struct wm831x_isink *isink = data;
regulator_notifier_call_chain(isink->regulator,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
return IRQ_HANDLED;
}
static int wm831x_isink_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct wm831x_isink *isink;
int id = pdev->id % ARRAY_SIZE(pdata->isink);
struct regulator_config config = { };
struct resource *res;
int ret, irq;
dev_dbg(&pdev->dev, "Probing ISINK%d\n", id + 1);
if (pdata == NULL || pdata->isink[id] == NULL)
return -ENODEV;
isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
GFP_KERNEL);
if (!isink)
return -ENOMEM;
isink->wm831x = wm831x;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No REG resource\n");
ret = -EINVAL;
goto err;
}
isink->reg = res->start;
/* For current parts this is correct; probably need to revisit
* in future.
*/
snprintf(isink->name, sizeof(isink->name), "ISINK%d", id + 1);
isink->desc.name = isink->name;
isink->desc.id = id;
isink->desc.ops = &wm831x_isink_ops;
isink->desc.type = REGULATOR_CURRENT;
isink->desc.owner = THIS_MODULE;
isink->desc.curr_table = wm831x_isinkv_values,
isink->desc.n_current_limits = ARRAY_SIZE(wm831x_isinkv_values),
isink->desc.csel_reg = isink->reg,
isink->desc.csel_mask = WM831X_CS1_ISEL_MASK,
config.dev = pdev->dev.parent;
config.init_data = pdata->isink[id];
config.driver_data = isink;
config.regmap = wm831x->regmap;
isink->regulator = devm_regulator_register(&pdev->dev, &isink->desc,
&config);
if (IS_ERR(isink->regulator)) {
ret = PTR_ERR(isink->regulator);
dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
id + 1, ret);
goto err;
}
irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
wm831x_isink_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
isink->name,
isink);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n",
irq, ret);
goto err;
}
platform_set_drvdata(pdev, isink);
return 0;
err:
return ret;
}
static struct platform_driver wm831x_isink_driver = {
.probe = wm831x_isink_probe,
.driver = {
.name = "wm831x-isink",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init wm831x_isink_init(void)
{
int ret;
ret = platform_driver_register(&wm831x_isink_driver);
if (ret != 0)
pr_err("Failed to register WM831x ISINK driver: %d\n", ret);
return ret;
}
subsys_initcall(wm831x_isink_init);
static void __exit wm831x_isink_exit(void)
{
platform_driver_unregister(&wm831x_isink_driver);
}
module_exit(wm831x_isink_exit);
/* Module information */
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM831x current sink driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-isink");
| linux-master | drivers/regulator/wm831x-isink.c |
// SPDX-License-Identifier: GPL-2.0
//
// Lochnagar regulator driver
//
// Copyright (c) 2017-2018 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
//
// Author: Charles Keepax <[email protected]>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/lochnagar.h>
#include <linux/mfd/lochnagar1_regs.h>
#include <linux/mfd/lochnagar2_regs.h>
static const struct regulator_ops lochnagar_micvdd_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct linear_range lochnagar_micvdd_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 0xC, 50000),
REGULATOR_LINEAR_RANGE(1700000, 0xD, 0x1F, 100000),
};
static int lochnagar_micbias_enable(struct regulator_dev *rdev)
{
struct lochnagar *lochnagar = rdev_get_drvdata(rdev);
int ret;
mutex_lock(&lochnagar->analogue_config_lock);
ret = regulator_enable_regmap(rdev);
if (ret < 0)
goto err;
ret = lochnagar_update_config(lochnagar);
err:
mutex_unlock(&lochnagar->analogue_config_lock);
return ret;
}
static int lochnagar_micbias_disable(struct regulator_dev *rdev)
{
struct lochnagar *lochnagar = rdev_get_drvdata(rdev);
int ret;
mutex_lock(&lochnagar->analogue_config_lock);
ret = regulator_disable_regmap(rdev);
if (ret < 0)
goto err;
ret = lochnagar_update_config(lochnagar);
err:
mutex_unlock(&lochnagar->analogue_config_lock);
return ret;
}
static const struct regulator_ops lochnagar_micbias_ops = {
.enable = lochnagar_micbias_enable,
.disable = lochnagar_micbias_disable,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_ops lochnagar_vddcore_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct linear_range lochnagar_vddcore_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x7, 0),
REGULATOR_LINEAR_RANGE(600000, 0x8, 0x41, 12500),
};
enum lochnagar_regulators {
LOCHNAGAR_MICVDD,
LOCHNAGAR_MIC1VDD,
LOCHNAGAR_MIC2VDD,
LOCHNAGAR_VDDCORE,
};
static int lochnagar_micbias_of_parse(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct lochnagar *lochnagar = config->driver_data;
int shift = (desc->id - LOCHNAGAR_MIC1VDD) *
LOCHNAGAR2_P2_MICBIAS_SRC_SHIFT;
int mask = LOCHNAGAR2_P1_MICBIAS_SRC_MASK << shift;
unsigned int val;
int ret;
ret = of_property_read_u32(np, "cirrus,micbias-input", &val);
if (ret >= 0) {
mutex_lock(&lochnagar->analogue_config_lock);
ret = regmap_update_bits(lochnagar->regmap,
LOCHNAGAR2_ANALOGUE_PATH_CTRL2,
mask, val << shift);
mutex_unlock(&lochnagar->analogue_config_lock);
if (ret < 0) {
dev_err(lochnagar->dev,
"Failed to update micbias source: %d\n", ret);
return ret;
}
}
return 0;
}
static const struct regulator_desc lochnagar_regulators[] = {
[LOCHNAGAR_MICVDD] = {
.name = "MICVDD",
.supply_name = "SYSVDD",
.type = REGULATOR_VOLTAGE,
.n_voltages = 32,
.ops = &lochnagar_micvdd_ops,
.id = LOCHNAGAR_MICVDD,
.of_match = of_match_ptr("MICVDD"),
.enable_reg = LOCHNAGAR2_MICVDD_CTRL1,
.enable_mask = LOCHNAGAR2_MICVDD_REG_ENA_MASK,
.vsel_reg = LOCHNAGAR2_MICVDD_CTRL2,
.vsel_mask = LOCHNAGAR2_MICVDD_VSEL_MASK,
.linear_ranges = lochnagar_micvdd_ranges,
.n_linear_ranges = ARRAY_SIZE(lochnagar_micvdd_ranges),
.enable_time = 3000,
.ramp_delay = 1000,
.owner = THIS_MODULE,
},
[LOCHNAGAR_MIC1VDD] = {
.name = "MIC1VDD",
.supply_name = "MICBIAS1",
.type = REGULATOR_VOLTAGE,
.ops = &lochnagar_micbias_ops,
.id = LOCHNAGAR_MIC1VDD,
.of_match = of_match_ptr("MIC1VDD"),
.of_parse_cb = lochnagar_micbias_of_parse,
.enable_reg = LOCHNAGAR2_ANALOGUE_PATH_CTRL2,
.enable_mask = LOCHNAGAR2_P1_INPUT_BIAS_ENA_MASK,
.owner = THIS_MODULE,
},
[LOCHNAGAR_MIC2VDD] = {
.name = "MIC2VDD",
.supply_name = "MICBIAS2",
.type = REGULATOR_VOLTAGE,
.ops = &lochnagar_micbias_ops,
.id = LOCHNAGAR_MIC2VDD,
.of_match = of_match_ptr("MIC2VDD"),
.of_parse_cb = lochnagar_micbias_of_parse,
.enable_reg = LOCHNAGAR2_ANALOGUE_PATH_CTRL2,
.enable_mask = LOCHNAGAR2_P2_INPUT_BIAS_ENA_MASK,
.owner = THIS_MODULE,
},
[LOCHNAGAR_VDDCORE] = {
.name = "VDDCORE",
.supply_name = "SYSVDD",
.type = REGULATOR_VOLTAGE,
.n_voltages = 66,
.ops = &lochnagar_vddcore_ops,
.id = LOCHNAGAR_VDDCORE,
.of_match = of_match_ptr("VDDCORE"),
.enable_reg = LOCHNAGAR2_VDDCORE_CDC_CTRL1,
.enable_mask = LOCHNAGAR2_VDDCORE_CDC_REG_ENA_MASK,
.vsel_reg = LOCHNAGAR2_VDDCORE_CDC_CTRL2,
.vsel_mask = LOCHNAGAR2_VDDCORE_CDC_VSEL_MASK,
.linear_ranges = lochnagar_vddcore_ranges,
.n_linear_ranges = ARRAY_SIZE(lochnagar_vddcore_ranges),
.enable_time = 3000,
.ramp_delay = 1000,
.off_on_delay = 15000,
.owner = THIS_MODULE,
},
};
static const struct of_device_id lochnagar_of_match[] = {
{
.compatible = "cirrus,lochnagar2-micvdd",
.data = &lochnagar_regulators[LOCHNAGAR_MICVDD],
},
{
.compatible = "cirrus,lochnagar2-mic1vdd",
.data = &lochnagar_regulators[LOCHNAGAR_MIC1VDD],
},
{
.compatible = "cirrus,lochnagar2-mic2vdd",
.data = &lochnagar_regulators[LOCHNAGAR_MIC2VDD],
},
{
.compatible = "cirrus,lochnagar2-vddcore",
.data = &lochnagar_regulators[LOCHNAGAR_VDDCORE],
},
{}
};
MODULE_DEVICE_TABLE(of, lochnagar_of_match);
static int lochnagar_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct lochnagar *lochnagar = dev_get_drvdata(dev->parent);
struct regulator_config config = { };
const struct of_device_id *of_id;
const struct regulator_desc *desc;
struct regulator_dev *rdev;
int ret;
config.dev = dev;
config.regmap = lochnagar->regmap;
config.driver_data = lochnagar;
of_id = of_match_device(lochnagar_of_match, dev);
if (!of_id)
return -EINVAL;
desc = of_id->data;
rdev = devm_regulator_register(dev, desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(dev, "Failed to register %s regulator: %d\n",
desc->name, ret);
return ret;
}
return 0;
}
static struct platform_driver lochnagar_regulator_driver = {
.driver = {
.name = "lochnagar-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(lochnagar_of_match),
},
.probe = lochnagar_regulator_probe,
};
module_platform_driver(lochnagar_regulator_driver);
MODULE_AUTHOR("Charles Keepax <[email protected]>");
MODULE_DESCRIPTION("Regulator driver for Cirrus Logic Lochnagar Board");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:lochnagar-regulator");
| linux-master | drivers/regulator/lochnagar-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright 2020 Google LLC.
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
struct cros_ec_regulator_data {
struct regulator_desc desc;
struct regulator_dev *dev;
struct cros_ec_device *ec_dev;
u32 index;
u16 *voltages_mV;
u16 num_voltages;
};
static int cros_ec_regulator_enable(struct regulator_dev *dev)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
struct ec_params_regulator_enable cmd = {
.index = data->index,
.enable = 1,
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_disable(struct regulator_dev *dev)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
struct ec_params_regulator_enable cmd = {
.index = data->index,
.enable = 0,
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_is_enabled(struct regulator_dev *dev)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
struct ec_params_regulator_is_enabled cmd = {
.index = data->index,
};
struct ec_response_regulator_is_enabled resp;
int ret;
ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_IS_ENABLED, &cmd,
sizeof(cmd), &resp, sizeof(resp));
if (ret < 0)
return ret;
return resp.enabled;
}
static int cros_ec_regulator_list_voltage(struct regulator_dev *dev,
unsigned int selector)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
if (selector >= data->num_voltages)
return -EINVAL;
return data->voltages_mV[selector] * 1000;
}
static int cros_ec_regulator_get_voltage(struct regulator_dev *dev)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
struct ec_params_regulator_get_voltage cmd = {
.index = data->index,
};
struct ec_response_regulator_get_voltage resp;
int ret;
ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_GET_VOLTAGE, &cmd,
sizeof(cmd), &resp, sizeof(resp));
if (ret < 0)
return ret;
return resp.voltage_mv * 1000;
}
static int cros_ec_regulator_set_voltage(struct regulator_dev *dev, int min_uV,
int max_uV, unsigned int *selector)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
int min_mV = DIV_ROUND_UP(min_uV, 1000);
int max_mV = max_uV / 1000;
struct ec_params_regulator_set_voltage cmd = {
.index = data->index,
.min_mv = min_mV,
.max_mv = max_mV,
};
/*
* This can happen when the given range [min_uV, max_uV] doesn't
* contain any voltage that can be represented exactly in mV.
*/
if (min_mV > max_mV)
return -EINVAL;
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_SET_VOLTAGE, &cmd,
sizeof(cmd), NULL, 0);
}
static const struct regulator_ops cros_ec_regulator_voltage_ops = {
.enable = cros_ec_regulator_enable,
.disable = cros_ec_regulator_disable,
.is_enabled = cros_ec_regulator_is_enabled,
.list_voltage = cros_ec_regulator_list_voltage,
.get_voltage = cros_ec_regulator_get_voltage,
.set_voltage = cros_ec_regulator_set_voltage,
};
static int cros_ec_regulator_init_info(struct device *dev,
struct cros_ec_regulator_data *data)
{
struct ec_params_regulator_get_info cmd = {
.index = data->index,
};
struct ec_response_regulator_get_info resp;
int ret;
ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_GET_INFO, &cmd,
sizeof(cmd), &resp, sizeof(resp));
if (ret < 0)
return ret;
data->num_voltages =
min_t(u16, ARRAY_SIZE(resp.voltages_mv), resp.num_voltages);
data->voltages_mV =
devm_kmemdup(dev, resp.voltages_mv,
sizeof(u16) * data->num_voltages, GFP_KERNEL);
if (!data->voltages_mV)
return -ENOMEM;
data->desc.n_voltages = data->num_voltages;
/* Make sure the returned name is always a valid string */
resp.name[ARRAY_SIZE(resp.name) - 1] = '\0';
data->desc.name = devm_kstrdup(dev, resp.name, GFP_KERNEL);
if (!data->desc.name)
return -ENOMEM;
return 0;
}
static int cros_ec_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct cros_ec_regulator_data *drvdata;
struct regulator_init_data *init_data;
struct regulator_config cfg = {};
struct regulator_desc *desc;
int ret;
drvdata = devm_kzalloc(
&pdev->dev, sizeof(struct cros_ec_regulator_data), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->ec_dev = dev_get_drvdata(dev->parent);
desc = &drvdata->desc;
init_data = of_get_regulator_init_data(dev, np, desc);
if (!init_data)
return -EINVAL;
ret = of_property_read_u32(np, "reg", &drvdata->index);
if (ret < 0)
return ret;
desc->owner = THIS_MODULE;
desc->type = REGULATOR_VOLTAGE;
desc->ops = &cros_ec_regulator_voltage_ops;
ret = cros_ec_regulator_init_info(dev, drvdata);
if (ret < 0)
return ret;
cfg.dev = &pdev->dev;
cfg.init_data = init_data;
cfg.driver_data = drvdata;
cfg.of_node = np;
drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, drvdata);
return 0;
}
static const struct of_device_id regulator_cros_ec_of_match[] = {
{ .compatible = "google,cros-ec-regulator", },
{}
};
MODULE_DEVICE_TABLE(of, regulator_cros_ec_of_match);
static struct platform_driver cros_ec_regulator_driver = {
.probe = cros_ec_regulator_probe,
.driver = {
.name = "cros-ec-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = regulator_cros_ec_of_match,
},
};
module_platform_driver(cros_ec_regulator_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS EC controlled regulator");
MODULE_AUTHOR("Pi-Hsun Shih <[email protected]>");
| linux-master | drivers/regulator/cros-ec-regulator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PCAP2 Regulator Driver
*
* Copyright (c) 2009 Daniel Ribeiro <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/ezx-pcap.h>
static const unsigned int V1_table[] = {
2775000, 1275000, 1600000, 1725000, 1825000, 1925000, 2075000, 2275000,
};
static const unsigned int V2_table[] = {
2500000, 2775000,
};
static const unsigned int V3_table[] = {
1075000, 1275000, 1550000, 1725000, 1876000, 1950000, 2075000, 2275000,
};
static const unsigned int V4_table[] = {
1275000, 1550000, 1725000, 1875000, 1950000, 2075000, 2275000, 2775000,
};
static const unsigned int V5_table[] = {
1875000, 2275000, 2475000, 2775000,
};
static const unsigned int V6_table[] = {
2475000, 2775000,
};
static const unsigned int V7_table[] = {
1875000, 2775000,
};
#define V8_table V4_table
static const unsigned int V9_table[] = {
1575000, 1875000, 2475000, 2775000,
};
static const unsigned int V10_table[] = {
5000000,
};
static const unsigned int VAUX1_table[] = {
1875000, 2475000, 2775000, 3000000,
};
#define VAUX2_table VAUX1_table
static const unsigned int VAUX3_table[] = {
1200000, 1200000, 1200000, 1200000, 1400000, 1600000, 1800000, 2000000,
2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000,
};
static const unsigned int VAUX4_table[] = {
1800000, 1800000, 3000000, 5000000,
};
static const unsigned int VSIM_table[] = {
1875000, 3000000,
};
static const unsigned int VSIM2_table[] = {
1875000,
};
static const unsigned int VVIB_table[] = {
1300000, 1800000, 2000000, 3000000,
};
static const unsigned int SW1_table[] = {
900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1250000,
1300000, 1350000, 1400000, 1450000, 1500000, 1600000, 1875000, 2250000,
};
#define SW2_table SW1_table
struct pcap_regulator {
const u8 reg;
const u8 en;
const u8 index;
const u8 stby;
const u8 lowpwr;
};
#define NA 0xff
#define VREG_INFO(_vreg, _reg, _en, _index, _stby, _lowpwr) \
[_vreg] = { \
.reg = _reg, \
.en = _en, \
.index = _index, \
.stby = _stby, \
.lowpwr = _lowpwr, \
}
static struct pcap_regulator vreg_table[] = {
VREG_INFO(V1, PCAP_REG_VREG1, 1, 2, 18, 0),
VREG_INFO(V2, PCAP_REG_VREG1, 5, 6, 19, 22),
VREG_INFO(V3, PCAP_REG_VREG1, 7, 8, 20, 23),
VREG_INFO(V4, PCAP_REG_VREG1, 11, 12, 21, 24),
/* V5 STBY and LOWPWR are on PCAP_REG_VREG2 */
VREG_INFO(V5, PCAP_REG_VREG1, 15, 16, 12, 19),
VREG_INFO(V6, PCAP_REG_VREG2, 1, 2, 14, 20),
VREG_INFO(V7, PCAP_REG_VREG2, 3, 4, 15, 21),
VREG_INFO(V8, PCAP_REG_VREG2, 5, 6, 16, 22),
VREG_INFO(V9, PCAP_REG_VREG2, 9, 10, 17, 23),
VREG_INFO(V10, PCAP_REG_VREG2, 10, NA, 18, 24),
VREG_INFO(VAUX1, PCAP_REG_AUXVREG, 1, 2, 22, 23),
/* VAUX2 ... VSIM2 STBY and LOWPWR are on PCAP_REG_LOWPWR */
VREG_INFO(VAUX2, PCAP_REG_AUXVREG, 4, 5, 0, 1),
VREG_INFO(VAUX3, PCAP_REG_AUXVREG, 7, 8, 2, 3),
VREG_INFO(VAUX4, PCAP_REG_AUXVREG, 12, 13, 4, 5),
VREG_INFO(VSIM, PCAP_REG_AUXVREG, 17, 18, NA, 6),
VREG_INFO(VSIM2, PCAP_REG_AUXVREG, 16, NA, NA, 7),
VREG_INFO(VVIB, PCAP_REG_AUXVREG, 19, 20, NA, NA),
VREG_INFO(SW1, PCAP_REG_SWCTRL, 1, 2, NA, NA),
VREG_INFO(SW2, PCAP_REG_SWCTRL, 6, 7, NA, NA),
/* SW3 STBY is on PCAP_REG_AUXVREG */
VREG_INFO(SW3, PCAP_REG_SWCTRL, 11, 12, 24, NA),
/* SWxS used to control SWx voltage on standby */
/* VREG_INFO(SW1S, PCAP_REG_LOWPWR, NA, 12, NA, NA),
VREG_INFO(SW2S, PCAP_REG_LOWPWR, NA, 20, NA, NA), */
};
static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
/* the regulator doesn't support voltage switching */
if (rdev->desc->n_voltages == 1)
return -EINVAL;
return ezx_pcap_set_bits(pcap, vreg->reg,
(rdev->desc->n_voltages - 1) << vreg->index,
selector << vreg->index);
}
static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
u32 tmp;
if (rdev->desc->n_voltages == 1)
return 0;
ezx_pcap_read(pcap, vreg->reg, &tmp);
tmp = ((tmp >> vreg->index) & (rdev->desc->n_voltages - 1));
return tmp;
}
static int pcap_regulator_enable(struct regulator_dev *rdev)
{
struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
if (vreg->en == NA)
return -EINVAL;
return ezx_pcap_set_bits(pcap, vreg->reg, 1 << vreg->en, 1 << vreg->en);
}
static int pcap_regulator_disable(struct regulator_dev *rdev)
{
struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
if (vreg->en == NA)
return -EINVAL;
return ezx_pcap_set_bits(pcap, vreg->reg, 1 << vreg->en, 0);
}
static int pcap_regulator_is_enabled(struct regulator_dev *rdev)
{
struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
u32 tmp;
if (vreg->en == NA)
return -EINVAL;
ezx_pcap_read(pcap, vreg->reg, &tmp);
return (tmp >> vreg->en) & 1;
}
static const struct regulator_ops pcap_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = pcap_regulator_set_voltage_sel,
.get_voltage_sel = pcap_regulator_get_voltage_sel,
.enable = pcap_regulator_enable,
.disable = pcap_regulator_disable,
.is_enabled = pcap_regulator_is_enabled,
};
#define VREG(_vreg) \
[_vreg] = { \
.name = #_vreg, \
.id = _vreg, \
.n_voltages = ARRAY_SIZE(_vreg##_table), \
.volt_table = _vreg##_table, \
.ops = &pcap_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
static const struct regulator_desc pcap_regulators[] = {
VREG(V1), VREG(V2), VREG(V3), VREG(V4), VREG(V5), VREG(V6), VREG(V7),
VREG(V8), VREG(V9), VREG(V10), VREG(VAUX1), VREG(VAUX2), VREG(VAUX3),
VREG(VAUX4), VREG(VSIM), VREG(VSIM2), VREG(VVIB), VREG(SW1), VREG(SW2),
};
static int pcap_regulator_probe(struct platform_device *pdev)
{
struct regulator_dev *rdev;
void *pcap = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
config.dev = &pdev->dev;
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = pcap;
rdev = devm_regulator_register(&pdev->dev, &pcap_regulators[pdev->id],
&config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver pcap_regulator_driver = {
.driver = {
.name = "pcap-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = pcap_regulator_probe,
};
static int __init pcap_regulator_init(void)
{
return platform_driver_register(&pcap_regulator_driver);
}
static void __exit pcap_regulator_exit(void)
{
platform_driver_unregister(&pcap_regulator_driver);
}
subsys_initcall(pcap_regulator_init);
module_exit(pcap_regulator_exit);
MODULE_AUTHOR("Daniel Ribeiro <[email protected]>");
MODULE_DESCRIPTION("PCAP2 Regulator Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/pcap-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// pv88090-regulator.c - Regulator device driver for PV88090
// Copyright (C) 2015 Powerventure Semiconductor Ltd.
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/regulator/of_regulator.h>
#include "pv88090-regulator.h"
#define PV88090_MAX_REGULATORS 5
/* PV88090 REGULATOR IDs */
enum {
/* BUCKs */
PV88090_ID_BUCK1,
PV88090_ID_BUCK2,
PV88090_ID_BUCK3,
/* LDOs */
PV88090_ID_LDO1,
PV88090_ID_LDO2,
};
struct pv88090_regulator {
struct regulator_desc desc;
unsigned int conf;
unsigned int conf2;
};
struct pv88090 {
struct device *dev;
struct regmap *regmap;
struct regulator_dev *rdev[PV88090_MAX_REGULATORS];
};
struct pv88090_buck_voltage {
int min_uV;
int max_uV;
int uV_step;
};
static const struct regmap_config pv88090_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
/* Current limits array (in uA) for BUCK1, BUCK2, BUCK3.
* Entry indexes corresponds to register values.
*/
static const unsigned int pv88090_buck1_limits[] = {
220000, 440000, 660000, 880000, 1100000, 1320000, 1540000, 1760000,
1980000, 2200000, 2420000, 2640000, 2860000, 3080000, 3300000, 3520000,
3740000, 3960000, 4180000, 4400000, 4620000, 4840000, 5060000, 5280000,
5500000, 5720000, 5940000, 6160000, 6380000, 6600000, 6820000, 7040000
};
static const unsigned int pv88090_buck23_limits[] = {
1496000, 2393000, 3291000, 4189000
};
static const struct pv88090_buck_voltage pv88090_buck_vol[3] = {
{
.min_uV = 600000,
.max_uV = 1393750,
.uV_step = 6250,
},
{
.min_uV = 1400000,
.max_uV = 2193750,
.uV_step = 6250,
},
{
.min_uV = 1250000,
.max_uV = 2837500,
.uV_step = 12500,
},
};
static unsigned int pv88090_buck_get_mode(struct regulator_dev *rdev)
{
struct pv88090_regulator *info = rdev_get_drvdata(rdev);
unsigned int data;
int ret, mode = 0;
ret = regmap_read(rdev->regmap, info->conf, &data);
if (ret < 0)
return ret;
switch (data & PV88090_BUCK1_MODE_MASK) {
case PV88090_BUCK_MODE_SYNC:
mode = REGULATOR_MODE_FAST;
break;
case PV88090_BUCK_MODE_AUTO:
mode = REGULATOR_MODE_NORMAL;
break;
case PV88090_BUCK_MODE_SLEEP:
mode = REGULATOR_MODE_STANDBY;
break;
}
return mode;
}
static int pv88090_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct pv88090_regulator *info = rdev_get_drvdata(rdev);
int val = 0;
switch (mode) {
case REGULATOR_MODE_FAST:
val = PV88090_BUCK_MODE_SYNC;
break;
case REGULATOR_MODE_NORMAL:
val = PV88090_BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
val = PV88090_BUCK_MODE_SLEEP;
break;
default:
return -EINVAL;
}
return regmap_update_bits(rdev->regmap, info->conf,
PV88090_BUCK1_MODE_MASK, val);
}
static const struct regulator_ops pv88090_buck_ops = {
.get_mode = pv88090_buck_get_mode,
.set_mode = pv88090_buck_set_mode,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_current_limit = regulator_set_current_limit_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_ops pv88090_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
};
#define PV88090_BUCK(chip, regl_name, min, step, max, limits_array) \
{\
.desc = {\
.id = chip##_ID_##regl_name,\
.name = __stringify(chip##_##regl_name),\
.of_match = of_match_ptr(#regl_name),\
.regulators_node = of_match_ptr("regulators"),\
.type = REGULATOR_VOLTAGE,\
.owner = THIS_MODULE,\
.ops = &pv88090_buck_ops,\
.min_uV = min, \
.uV_step = step, \
.n_voltages = ((max) - (min))/(step) + 1, \
.enable_reg = PV88090_REG_##regl_name##_CONF0, \
.enable_mask = PV88090_##regl_name##_EN, \
.vsel_reg = PV88090_REG_##regl_name##_CONF0, \
.vsel_mask = PV88090_V##regl_name##_MASK, \
.curr_table = limits_array, \
.n_current_limits = ARRAY_SIZE(limits_array), \
.csel_reg = PV88090_REG_##regl_name##_CONF1, \
.csel_mask = PV88090_##regl_name##_ILIM_MASK, \
},\
.conf = PV88090_REG_##regl_name##_CONF1, \
.conf2 = PV88090_REG_##regl_name##_CONF2, \
}
#define PV88090_LDO(chip, regl_name, min, step, max) \
{\
.desc = {\
.id = chip##_ID_##regl_name,\
.name = __stringify(chip##_##regl_name),\
.of_match = of_match_ptr(#regl_name),\
.regulators_node = of_match_ptr("regulators"),\
.type = REGULATOR_VOLTAGE,\
.owner = THIS_MODULE,\
.ops = &pv88090_ldo_ops,\
.min_uV = min, \
.uV_step = step, \
.n_voltages = ((max) - (min))/(step) + 1, \
.enable_reg = PV88090_REG_##regl_name##_CONT, \
.enable_mask = PV88090_##regl_name##_EN, \
.vsel_reg = PV88090_REG_##regl_name##_CONT, \
.vsel_mask = PV88090_V##regl_name##_MASK, \
},\
}
static struct pv88090_regulator pv88090_regulator_info[] = {
PV88090_BUCK(PV88090, BUCK1, 600000, 6250, 1393750,
pv88090_buck1_limits),
PV88090_BUCK(PV88090, BUCK2, 600000, 6250, 1393750,
pv88090_buck23_limits),
PV88090_BUCK(PV88090, BUCK3, 600000, 6250, 1393750,
pv88090_buck23_limits),
PV88090_LDO(PV88090, LDO1, 1200000, 50000, 4350000),
PV88090_LDO(PV88090, LDO2, 650000, 25000, 2225000),
};
static irqreturn_t pv88090_irq_handler(int irq, void *data)
{
struct pv88090 *chip = data;
int i, reg_val, err, ret = IRQ_NONE;
err = regmap_read(chip->regmap, PV88090_REG_EVENT_A, ®_val);
if (err < 0)
goto error_i2c;
if (reg_val & PV88090_E_VDD_FLT) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
}
err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
PV88090_E_VDD_FLT);
if (err < 0)
goto error_i2c;
ret = IRQ_HANDLED;
}
if (reg_val & PV88090_E_OVER_TEMP) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
}
err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
PV88090_E_OVER_TEMP);
if (err < 0)
goto error_i2c;
ret = IRQ_HANDLED;
}
return ret;
error_i2c:
dev_err(chip->dev, "I2C error : %d\n", err);
return IRQ_NONE;
}
/*
* I2C driver interface functions
*/
static int pv88090_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88090 *chip;
struct regulator_config config = { };
int error, i, ret = 0;
unsigned int conf2, range, index;
chip = devm_kzalloc(&i2c->dev, sizeof(struct pv88090), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->dev = &i2c->dev;
chip->regmap = devm_regmap_init_i2c(i2c, &pv88090_regmap_config);
if (IS_ERR(chip->regmap)) {
error = PTR_ERR(chip->regmap);
dev_err(chip->dev, "Failed to allocate register map: %d\n",
error);
return error;
}
i2c_set_clientdata(i2c, chip);
if (i2c->irq != 0) {
ret = regmap_write(chip->regmap, PV88090_REG_MASK_A, 0xFF);
if (ret < 0) {
dev_err(chip->dev,
"Failed to mask A reg: %d\n", ret);
return ret;
}
ret = regmap_write(chip->regmap, PV88090_REG_MASK_B, 0xFF);
if (ret < 0) {
dev_err(chip->dev,
"Failed to mask B reg: %d\n", ret);
return ret;
}
ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
pv88090_irq_handler,
IRQF_TRIGGER_LOW|IRQF_ONESHOT,
"pv88090", chip);
if (ret != 0) {
dev_err(chip->dev, "Failed to request IRQ: %d\n",
i2c->irq);
return ret;
}
ret = regmap_update_bits(chip->regmap, PV88090_REG_MASK_A,
PV88090_M_VDD_FLT | PV88090_M_OVER_TEMP, 0);
if (ret < 0) {
dev_err(chip->dev,
"Failed to update mask reg: %d\n", ret);
return ret;
}
} else {
dev_warn(chip->dev, "No IRQ configured\n");
}
config.dev = chip->dev;
config.regmap = chip->regmap;
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
if (init_data)
config.init_data = &init_data[i];
if (i == PV88090_ID_BUCK2 || i == PV88090_ID_BUCK3) {
ret = regmap_read(chip->regmap,
pv88090_regulator_info[i].conf2, &conf2);
if (ret < 0)
return ret;
conf2 = (conf2 >> PV88090_BUCK_VDAC_RANGE_SHIFT) &
PV88090_BUCK_VDAC_RANGE_MASK;
ret = regmap_read(chip->regmap,
PV88090_REG_BUCK_FOLD_RANGE, &range);
if (ret < 0)
return ret;
range = (range >>
(PV88090_BUCK_VRANGE_GAIN_SHIFT + i - 1)) &
PV88090_BUCK_VRANGE_GAIN_MASK;
index = ((range << 1) | conf2);
if (index > PV88090_ID_BUCK3) {
dev_err(chip->dev,
"Invalid index(%d)\n", index);
return -EINVAL;
}
pv88090_regulator_info[i].desc.min_uV
= pv88090_buck_vol[index].min_uV;
pv88090_regulator_info[i].desc.uV_step
= pv88090_buck_vol[index].uV_step;
pv88090_regulator_info[i].desc.n_voltages
= ((pv88090_buck_vol[index].max_uV)
- (pv88090_buck_vol[index].min_uV))
/(pv88090_buck_vol[index].uV_step) + 1;
}
config.driver_data = (void *)&pv88090_regulator_info[i];
chip->rdev[i] = devm_regulator_register(chip->dev,
&pv88090_regulator_info[i].desc, &config);
if (IS_ERR(chip->rdev[i])) {
dev_err(chip->dev,
"Failed to register PV88090 regulator\n");
return PTR_ERR(chip->rdev[i]);
}
}
return 0;
}
static const struct i2c_device_id pv88090_i2c_id[] = {
{"pv88090", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, pv88090_i2c_id);
#ifdef CONFIG_OF
static const struct of_device_id pv88090_dt_ids[] = {
{ .compatible = "pvs,pv88090", .data = &pv88090_i2c_id[0] },
{},
};
MODULE_DEVICE_TABLE(of, pv88090_dt_ids);
#endif
static struct i2c_driver pv88090_regulator_driver = {
.driver = {
.name = "pv88090",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pv88090_dt_ids),
},
.probe = pv88090_i2c_probe,
.id_table = pv88090_i2c_id,
};
module_i2c_driver(pv88090_regulator_driver);
MODULE_AUTHOR("James Ban <[email protected]>");
MODULE_DESCRIPTION("Regulator device driver for Powerventure PV88090");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/pv88090-regulator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OF helpers for regulator framework
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Rajendra Nayak <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include "internal.h"
static const char *const regulator_states[PM_SUSPEND_MAX + 1] = {
[PM_SUSPEND_STANDBY] = "regulator-state-standby",
[PM_SUSPEND_MEM] = "regulator-state-mem",
[PM_SUSPEND_MAX] = "regulator-state-disk",
};
static void fill_limit(int *limit, int val)
{
if (val)
if (val == 1)
*limit = REGULATOR_NOTIF_LIMIT_ENABLE;
else
*limit = val;
else
*limit = REGULATOR_NOTIF_LIMIT_DISABLE;
}
static void of_get_regulator_prot_limits(struct device_node *np,
struct regulation_constraints *constraints)
{
u32 pval;
int i;
static const char *const props[] = {
"regulator-oc-%s-microamp",
"regulator-ov-%s-microvolt",
"regulator-temp-%s-kelvin",
"regulator-uv-%s-microvolt",
};
struct notification_limit *limits[] = {
&constraints->over_curr_limits,
&constraints->over_voltage_limits,
&constraints->temp_limits,
&constraints->under_voltage_limits,
};
bool set[4] = {0};
/* Protection limits: */
for (i = 0; i < ARRAY_SIZE(props); i++) {
char prop[255];
bool found;
int j;
static const char *const lvl[] = {
"protection", "error", "warn"
};
int *l[] = {
&limits[i]->prot, &limits[i]->err, &limits[i]->warn,
};
for (j = 0; j < ARRAY_SIZE(lvl); j++) {
snprintf(prop, 255, props[i], lvl[j]);
found = !of_property_read_u32(np, prop, &pval);
if (found)
fill_limit(l[j], pval);
set[i] |= found;
}
}
constraints->over_current_detection = set[0];
constraints->over_voltage_detection = set[1];
constraints->over_temp_detection = set[2];
constraints->under_voltage_detection = set[3];
}
static int of_get_regulation_constraints(struct device *dev,
struct device_node *np,
struct regulator_init_data **init_data,
const struct regulator_desc *desc)
{
struct regulation_constraints *constraints = &(*init_data)->constraints;
struct regulator_state *suspend_state;
struct device_node *suspend_np;
unsigned int mode;
int ret, i, len;
int n_phandles;
u32 pval;
n_phandles = of_count_phandle_with_args(np, "regulator-coupled-with",
NULL);
n_phandles = max(n_phandles, 0);
constraints->name = of_get_property(np, "regulator-name", NULL);
if (!of_property_read_u32(np, "regulator-min-microvolt", &pval))
constraints->min_uV = pval;
if (!of_property_read_u32(np, "regulator-max-microvolt", &pval))
constraints->max_uV = pval;
/* Voltage change possible? */
if (constraints->min_uV != constraints->max_uV)
constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
/* Do we have a voltage range, if so try to apply it? */
if (constraints->min_uV && constraints->max_uV)
constraints->apply_uV = true;
if (!of_property_read_u32(np, "regulator-microvolt-offset", &pval))
constraints->uV_offset = pval;
if (!of_property_read_u32(np, "regulator-min-microamp", &pval))
constraints->min_uA = pval;
if (!of_property_read_u32(np, "regulator-max-microamp", &pval))
constraints->max_uA = pval;
if (!of_property_read_u32(np, "regulator-input-current-limit-microamp",
&pval))
constraints->ilim_uA = pval;
/* Current change possible? */
if (constraints->min_uA != constraints->max_uA)
constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
constraints->boot_on = of_property_read_bool(np, "regulator-boot-on");
constraints->always_on = of_property_read_bool(np, "regulator-always-on");
if (!constraints->always_on) /* status change should be possible. */
constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
constraints->pull_down = of_property_read_bool(np, "regulator-pull-down");
if (of_property_read_bool(np, "regulator-allow-bypass"))
constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS;
if (of_property_read_bool(np, "regulator-allow-set-load"))
constraints->valid_ops_mask |= REGULATOR_CHANGE_DRMS;
ret = of_property_read_u32(np, "regulator-ramp-delay", &pval);
if (!ret) {
if (pval)
constraints->ramp_delay = pval;
else
constraints->ramp_disable = true;
}
ret = of_property_read_u32(np, "regulator-settling-time-us", &pval);
if (!ret)
constraints->settling_time = pval;
ret = of_property_read_u32(np, "regulator-settling-time-up-us", &pval);
if (!ret)
constraints->settling_time_up = pval;
if (constraints->settling_time_up && constraints->settling_time) {
pr_warn("%pOFn: ambiguous configuration for settling time, ignoring 'regulator-settling-time-up-us'\n",
np);
constraints->settling_time_up = 0;
}
ret = of_property_read_u32(np, "regulator-settling-time-down-us",
&pval);
if (!ret)
constraints->settling_time_down = pval;
if (constraints->settling_time_down && constraints->settling_time) {
pr_warn("%pOFn: ambiguous configuration for settling time, ignoring 'regulator-settling-time-down-us'\n",
np);
constraints->settling_time_down = 0;
}
ret = of_property_read_u32(np, "regulator-enable-ramp-delay", &pval);
if (!ret)
constraints->enable_time = pval;
constraints->soft_start = of_property_read_bool(np,
"regulator-soft-start");
ret = of_property_read_u32(np, "regulator-active-discharge", &pval);
if (!ret) {
constraints->active_discharge =
(pval) ? REGULATOR_ACTIVE_DISCHARGE_ENABLE :
REGULATOR_ACTIVE_DISCHARGE_DISABLE;
}
if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) {
if (desc && desc->of_map_mode) {
mode = desc->of_map_mode(pval);
if (mode == REGULATOR_MODE_INVALID)
pr_err("%pOFn: invalid mode %u\n", np, pval);
else
constraints->initial_mode = mode;
} else {
pr_warn("%pOFn: mapping for mode %d not defined\n",
np, pval);
}
}
len = of_property_count_elems_of_size(np, "regulator-allowed-modes",
sizeof(u32));
if (len > 0) {
if (desc && desc->of_map_mode) {
for (i = 0; i < len; i++) {
ret = of_property_read_u32_index(np,
"regulator-allowed-modes", i, &pval);
if (ret) {
pr_err("%pOFn: couldn't read allowed modes index %d, ret=%d\n",
np, i, ret);
break;
}
mode = desc->of_map_mode(pval);
if (mode == REGULATOR_MODE_INVALID)
pr_err("%pOFn: invalid regulator-allowed-modes element %u\n",
np, pval);
else
constraints->valid_modes_mask |= mode;
}
if (constraints->valid_modes_mask)
constraints->valid_ops_mask
|= REGULATOR_CHANGE_MODE;
} else {
pr_warn("%pOFn: mode mapping not defined\n", np);
}
}
if (!of_property_read_u32(np, "regulator-system-load", &pval))
constraints->system_load = pval;
if (n_phandles) {
constraints->max_spread = devm_kzalloc(dev,
sizeof(*constraints->max_spread) * n_phandles,
GFP_KERNEL);
if (!constraints->max_spread)
return -ENOMEM;
of_property_read_u32_array(np, "regulator-coupled-max-spread",
constraints->max_spread, n_phandles);
}
if (!of_property_read_u32(np, "regulator-max-step-microvolt",
&pval))
constraints->max_uV_step = pval;
constraints->over_current_protection = of_property_read_bool(np,
"regulator-over-current-protection");
of_get_regulator_prot_limits(np, constraints);
for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
switch (i) {
case PM_SUSPEND_MEM:
suspend_state = &constraints->state_mem;
break;
case PM_SUSPEND_MAX:
suspend_state = &constraints->state_disk;
break;
case PM_SUSPEND_STANDBY:
suspend_state = &constraints->state_standby;
break;
case PM_SUSPEND_ON:
case PM_SUSPEND_TO_IDLE:
default:
continue;
}
suspend_np = of_get_child_by_name(np, regulator_states[i]);
if (!suspend_np)
continue;
if (!suspend_state) {
of_node_put(suspend_np);
continue;
}
if (!of_property_read_u32(suspend_np, "regulator-mode",
&pval)) {
if (desc && desc->of_map_mode) {
mode = desc->of_map_mode(pval);
if (mode == REGULATOR_MODE_INVALID)
pr_err("%pOFn: invalid mode %u\n",
np, pval);
else
suspend_state->mode = mode;
} else {
pr_warn("%pOFn: mapping for mode %d not defined\n",
np, pval);
}
}
if (of_property_read_bool(suspend_np,
"regulator-on-in-suspend"))
suspend_state->enabled = ENABLE_IN_SUSPEND;
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
suspend_state->enabled = DISABLE_IN_SUSPEND;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-min-microvolt", &pval))
suspend_state->min_uV = pval;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-max-microvolt", &pval))
suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-microvolt", &pval))
suspend_state->uV = pval;
else /* otherwise use min_uV as default suspend voltage */
suspend_state->uV = suspend_state->min_uV;
if (of_property_read_bool(suspend_np,
"regulator-changeable-in-suspend"))
suspend_state->changeable = true;
if (i == PM_SUSPEND_MEM)
constraints->initial_state = PM_SUSPEND_MEM;
of_node_put(suspend_np);
suspend_state = NULL;
suspend_np = NULL;
}
return 0;
}
/**
* of_get_regulator_init_data - extract regulator_init_data structure info
* @dev: device requesting for regulator_init_data
* @node: regulator device node
* @desc: regulator description
*
* Populates regulator_init_data structure by extracting data from device
* tree node, returns a pointer to the populated structure or NULL if memory
* alloc fails.
*/
struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
struct device_node *node,
const struct regulator_desc *desc)
{
struct regulator_init_data *init_data;
if (!node)
return NULL;
init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
if (!init_data)
return NULL; /* Out of memory? */
if (of_get_regulation_constraints(dev, node, &init_data, desc))
return NULL;
return init_data;
}
EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
struct devm_of_regulator_matches {
struct of_regulator_match *matches;
unsigned int num_matches;
};
static void devm_of_regulator_put_matches(struct device *dev, void *res)
{
struct devm_of_regulator_matches *devm_matches = res;
int i;
for (i = 0; i < devm_matches->num_matches; i++)
of_node_put(devm_matches->matches[i].of_node);
}
/**
* of_regulator_match - extract multiple regulator init data from device tree.
* @dev: device requesting the data
* @node: parent device node of the regulators
* @matches: match table for the regulators
* @num_matches: number of entries in match table
*
* This function uses a match table specified by the regulator driver to
* parse regulator init data from the device tree. @node is expected to
* contain a set of child nodes, each providing the init data for one
* regulator. The data parsed from a child node will be matched to a regulator
* based on either the deprecated property regulator-compatible if present,
* or otherwise the child node's name. Note that the match table is modified
* in place and an additional of_node reference is taken for each matched
* regulator.
*
* Returns the number of matches found or a negative error code on failure.
*/
int of_regulator_match(struct device *dev, struct device_node *node,
struct of_regulator_match *matches,
unsigned int num_matches)
{
unsigned int count = 0;
unsigned int i;
const char *name;
struct device_node *child;
struct devm_of_regulator_matches *devm_matches;
if (!dev || !node)
return -EINVAL;
devm_matches = devres_alloc(devm_of_regulator_put_matches,
sizeof(struct devm_of_regulator_matches),
GFP_KERNEL);
if (!devm_matches)
return -ENOMEM;
devm_matches->matches = matches;
devm_matches->num_matches = num_matches;
devres_add(dev, devm_matches);
for (i = 0; i < num_matches; i++) {
struct of_regulator_match *match = &matches[i];
match->init_data = NULL;
match->of_node = NULL;
}
for_each_child_of_node(node, child) {
name = of_get_property(child,
"regulator-compatible", NULL);
if (!name)
name = child->name;
for (i = 0; i < num_matches; i++) {
struct of_regulator_match *match = &matches[i];
if (match->of_node)
continue;
if (strcmp(match->name, name))
continue;
match->init_data =
of_get_regulator_init_data(dev, child,
match->desc);
if (!match->init_data) {
dev_err(dev,
"failed to parse DT for regulator %pOFn\n",
child);
of_node_put(child);
return -EINVAL;
}
match->of_node = of_node_get(child);
count++;
break;
}
}
return count;
}
EXPORT_SYMBOL_GPL(of_regulator_match);
static struct
device_node *regulator_of_get_init_node(struct device *dev,
const struct regulator_desc *desc)
{
struct device_node *search, *child;
const char *name;
if (!dev->of_node || !desc->of_match)
return NULL;
if (desc->regulators_node) {
search = of_get_child_by_name(dev->of_node,
desc->regulators_node);
} else {
search = of_node_get(dev->of_node);
if (!strcmp(desc->of_match, search->name))
return search;
}
if (!search) {
dev_dbg(dev, "Failed to find regulator container node '%s'\n",
desc->regulators_node);
return NULL;
}
for_each_available_child_of_node(search, child) {
name = of_get_property(child, "regulator-compatible", NULL);
if (!name) {
if (!desc->of_match_full_name)
name = child->name;
else
name = child->full_name;
}
if (!strcmp(desc->of_match, name)) {
of_node_put(search);
/*
* 'of_node_get(child)' is already performed by the
* for_each loop.
*/
return child;
}
}
of_node_put(search);
return NULL;
}
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
struct regulator_config *config,
struct device_node **node)
{
struct device_node *child;
struct regulator_init_data *init_data = NULL;
child = regulator_of_get_init_node(config->dev, desc);
if (!child)
return NULL;
init_data = of_get_regulator_init_data(dev, child, desc);
if (!init_data) {
dev_err(dev, "failed to parse DT for regulator %pOFn\n", child);
goto error;
}
if (desc->of_parse_cb) {
int ret;
ret = desc->of_parse_cb(child, desc, config);
if (ret) {
if (ret == -EPROBE_DEFER) {
of_node_put(child);
return ERR_PTR(-EPROBE_DEFER);
}
dev_err(dev,
"driver callback failed to parse DT for regulator %pOFn\n",
child);
goto error;
}
}
*node = child;
return init_data;
error:
of_node_put(child);
return NULL;
}
struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
{
struct device *dev;
dev = class_find_device_by_of_node(®ulator_class, np);
return dev ? dev_to_rdev(dev) : NULL;
}
/*
* Returns number of regulators coupled with rdev.
*/
int of_get_n_coupled(struct regulator_dev *rdev)
{
struct device_node *node = rdev->dev.of_node;
int n_phandles;
n_phandles = of_count_phandle_with_args(node,
"regulator-coupled-with",
NULL);
return (n_phandles > 0) ? n_phandles : 0;
}
/* Looks for "to_find" device_node in src's "regulator-coupled-with" property */
static bool of_coupling_find_node(struct device_node *src,
struct device_node *to_find,
int *index)
{
int n_phandles, i;
bool found = false;
n_phandles = of_count_phandle_with_args(src,
"regulator-coupled-with",
NULL);
for (i = 0; i < n_phandles; i++) {
struct device_node *tmp = of_parse_phandle(src,
"regulator-coupled-with", i);
if (!tmp)
break;
/* found */
if (tmp == to_find)
found = true;
of_node_put(tmp);
if (found) {
*index = i;
break;
}
}
return found;
}
/**
* of_check_coupling_data - Parse rdev's coupling properties and check data
* consistency
* @rdev: pointer to regulator_dev whose data is checked
*
* Function checks if all the following conditions are met:
* - rdev's max_spread is greater than 0
* - all coupled regulators have the same max_spread
* - all coupled regulators have the same number of regulator_dev phandles
* - all regulators are linked to each other
*
* Returns true if all conditions are met.
*/
bool of_check_coupling_data(struct regulator_dev *rdev)
{
struct device_node *node = rdev->dev.of_node;
int n_phandles = of_get_n_coupled(rdev);
struct device_node *c_node;
int index;
int i;
bool ret = true;
/* iterate over rdev's phandles */
for (i = 0; i < n_phandles; i++) {
int max_spread = rdev->constraints->max_spread[i];
int c_max_spread, c_n_phandles;
if (max_spread <= 0) {
dev_err(&rdev->dev, "max_spread value invalid\n");
return false;
}
c_node = of_parse_phandle(node,
"regulator-coupled-with", i);
if (!c_node)
ret = false;
c_n_phandles = of_count_phandle_with_args(c_node,
"regulator-coupled-with",
NULL);
if (c_n_phandles != n_phandles) {
dev_err(&rdev->dev, "number of coupled reg phandles mismatch\n");
ret = false;
goto clean;
}
if (!of_coupling_find_node(c_node, node, &index)) {
dev_err(&rdev->dev, "missing 2-way linking for coupled regulators\n");
ret = false;
goto clean;
}
if (of_property_read_u32_index(c_node, "regulator-coupled-max-spread",
index, &c_max_spread)) {
ret = false;
goto clean;
}
if (c_max_spread != max_spread) {
dev_err(&rdev->dev,
"coupled regulators max_spread mismatch\n");
ret = false;
goto clean;
}
clean:
of_node_put(c_node);
if (!ret)
break;
}
return ret;
}
/**
* of_parse_coupled_regulator() - Get regulator_dev pointer from rdev's property
* @rdev: Pointer to regulator_dev, whose DTS is used as a source to parse
* "regulator-coupled-with" property
* @index: Index in phandles array
*
* Returns the regulator_dev pointer parsed from DTS. If it has not been yet
* registered, returns NULL
*/
struct regulator_dev *of_parse_coupled_regulator(struct regulator_dev *rdev,
int index)
{
struct device_node *node = rdev->dev.of_node;
struct device_node *c_node;
struct regulator_dev *c_rdev;
c_node = of_parse_phandle(node, "regulator-coupled-with", index);
if (!c_node)
return NULL;
c_rdev = of_find_regulator_by_node(c_node);
of_node_put(c_node);
return c_rdev;
}
/*
* Check if name is a supply name according to the '*-supply' pattern
* return 0 if false
* return length of supply name without the -supply
*/
static int is_supply_name(const char *name)
{
int strs, i;
strs = strlen(name);
/* string need to be at minimum len(x-supply) */
if (strs < 8)
return 0;
for (i = strs - 6; i > 0; i--) {
/* find first '-' and check if right part is supply */
if (name[i] != '-')
continue;
if (strcmp(name + i + 1, "supply") != 0)
return 0;
return i;
}
return 0;
}
/*
* of_regulator_bulk_get_all - get multiple regulator consumers
*
* @dev: Device to supply
* @np: device node to search for consumers
* @consumers: Configuration of consumers; clients are stored here.
*
* @return number of regulators on success, an errno on failure.
*
* This helper function allows drivers to get several regulator
* consumers in one operation. If any of the regulators cannot be
* acquired then any regulators that were allocated will be freed
* before returning to the caller.
*/
int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
struct regulator_bulk_data **consumers)
{
int num_consumers = 0;
struct regulator *tmp;
struct property *prop;
int i, n = 0, ret;
char name[64];
*consumers = NULL;
/*
* first pass: get numbers of xxx-supply
* second pass: fill consumers
*/
restart:
for_each_property_of_node(np, prop) {
i = is_supply_name(prop->name);
if (i == 0)
continue;
if (!*consumers) {
num_consumers++;
continue;
} else {
memcpy(name, prop->name, i);
name[i] = '\0';
tmp = regulator_get(dev, name);
if (IS_ERR(tmp)) {
ret = -EINVAL;
goto error;
}
(*consumers)[n].consumer = tmp;
n++;
continue;
}
}
if (*consumers)
return num_consumers;
if (num_consumers == 0)
return 0;
*consumers = kmalloc_array(num_consumers,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
if (!*consumers)
return -ENOMEM;
goto restart;
error:
while (--n >= 0)
regulator_put(consumers[n]->consumer);
return ret;
}
EXPORT_SYMBOL_GPL(of_regulator_bulk_get_all);
| linux-master | drivers/regulator/of_regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) STMicroelectronics 2017
*
* Author: Fabrice Gasnier <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/pm_runtime.h>
/* STM32 VREFBUF registers */
#define STM32_VREFBUF_CSR 0x00
/* STM32 VREFBUF CSR bitfields */
#define STM32_VRS GENMASK(6, 4)
#define STM32_VRR BIT(3)
#define STM32_HIZ BIT(1)
#define STM32_ENVR BIT(0)
#define STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS 10
struct stm32_vrefbuf {
void __iomem *base;
struct clk *clk;
struct device *dev;
};
static const unsigned int stm32_vrefbuf_voltages[] = {
/* Matches resp. VRS = 000b, 001b, 010b, 011b */
2500000, 2048000, 1800000, 1500000,
};
static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
u32 val;
int ret;
ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0)
return ret;
val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_HIZ) | STM32_ENVR;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
/*
* Vrefbuf startup time depends on external capacitor: wait here for
* VRR to be set. That means output has reached expected value.
* ~650us sleep should be enough for caps up to 1.5uF. Use 10ms as
* arbitrary timeout.
*/
ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val,
val & STM32_VRR, 650, 10000);
if (ret) {
dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");
val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_ENVR) | STM32_HIZ;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
}
pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_autosuspend(priv->dev);
return ret;
}
static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
u32 val;
int ret;
ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0)
return ret;
val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val &= ~STM32_ENVR;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_autosuspend(priv->dev);
return 0;
}
static int stm32_vrefbuf_is_enabled(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
int ret;
ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0)
return ret;
ret = readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR;
pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_autosuspend(priv->dev);
return ret;
}
static int stm32_vrefbuf_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
u32 val;
int ret;
ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0)
return ret;
val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_VRS) | FIELD_PREP(STM32_VRS, sel);
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_autosuspend(priv->dev);
return 0;
}
static int stm32_vrefbuf_get_voltage_sel(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
u32 val;
int ret;
ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0)
return ret;
val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
ret = FIELD_GET(STM32_VRS, val);
pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_autosuspend(priv->dev);
return ret;
}
static const struct regulator_ops stm32_vrefbuf_volt_ops = {
.enable = stm32_vrefbuf_enable,
.disable = stm32_vrefbuf_disable,
.is_enabled = stm32_vrefbuf_is_enabled,
.get_voltage_sel = stm32_vrefbuf_get_voltage_sel,
.set_voltage_sel = stm32_vrefbuf_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
};
static const struct regulator_desc stm32_vrefbuf_regu = {
.name = "vref",
.supply_name = "vdda",
.volt_table = stm32_vrefbuf_voltages,
.n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages),
.ops = &stm32_vrefbuf_volt_ops,
.off_on_delay = 1000,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
};
static int stm32_vrefbuf_probe(struct platform_device *pdev)
{
struct stm32_vrefbuf *priv;
struct regulator_config config = { };
struct regulator_dev *rdev;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev,
STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&pdev->dev, "clk prepare failed with error %d\n", ret);
goto err_pm_stop;
}
config.dev = &pdev->dev;
config.driver_data = priv;
config.of_node = pdev->dev.of_node;
config.init_data = of_get_regulator_init_data(&pdev->dev,
pdev->dev.of_node,
&stm32_vrefbuf_regu);
rdev = regulator_register(&pdev->dev, &stm32_vrefbuf_regu, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "register failed with error %d\n", ret);
goto err_clk_dis;
}
platform_set_drvdata(pdev, rdev);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_clk_dis:
clk_disable_unprepare(priv->clk);
err_pm_stop:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
return ret;
}
static int stm32_vrefbuf_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
pm_runtime_get_sync(&pdev->dev);
regulator_unregister(rdev);
clk_disable_unprepare(priv->clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
return 0;
};
static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
clk_disable_unprepare(priv->clk);
return 0;
}
static int __maybe_unused stm32_vrefbuf_runtime_resume(struct device *dev)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
return clk_prepare_enable(priv->clk);
}
static const struct dev_pm_ops stm32_vrefbuf_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(stm32_vrefbuf_runtime_suspend,
stm32_vrefbuf_runtime_resume,
NULL)
};
static const struct of_device_id __maybe_unused stm32_vrefbuf_of_match[] = {
{ .compatible = "st,stm32-vrefbuf", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_vrefbuf_of_match);
static struct platform_driver stm32_vrefbuf_driver = {
.probe = stm32_vrefbuf_probe,
.remove = stm32_vrefbuf_remove,
.driver = {
.name = "stm32-vrefbuf",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(stm32_vrefbuf_of_match),
.pm = &stm32_vrefbuf_pm_ops,
},
};
module_platform_driver(stm32_vrefbuf_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Fabrice Gasnier <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STM32 VREFBUF driver");
MODULE_ALIAS("platform:stm32-vrefbuf");
| linux-master | drivers/regulator/stm32-vrefbuf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* devres.c -- Voltage/Current Regulator framework devres implementation.
*
* Copyright 2013 Linaro Ltd
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/module.h>
#include "internal.h"
static void devm_regulator_release(struct device *dev, void *res)
{
regulator_put(*(struct regulator **)res);
}
static struct regulator *_devm_regulator_get(struct device *dev, const char *id,
int get_type)
{
struct regulator **ptr, *regulator;
ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
regulator = _regulator_get(dev, id, get_type);
if (!IS_ERR(regulator)) {
*ptr = regulator;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return regulator;
}
/**
* devm_regulator_get - Resource managed regulator_get()
* @dev: device to supply
* @id: supply name or regulator ID.
*
* Managed regulator_get(). Regulators returned from this function are
* automatically regulator_put() on driver detach. See regulator_get() for more
* information.
*/
struct regulator *devm_regulator_get(struct device *dev, const char *id)
{
return _devm_regulator_get(dev, id, NORMAL_GET);
}
EXPORT_SYMBOL_GPL(devm_regulator_get);
/**
* devm_regulator_get_exclusive - Resource managed regulator_get_exclusive()
* @dev: device to supply
* @id: supply name or regulator ID.
*
* Managed regulator_get_exclusive(). Regulators returned from this function
* are automatically regulator_put() on driver detach. See regulator_get() for
* more information.
*/
struct regulator *devm_regulator_get_exclusive(struct device *dev,
const char *id)
{
return _devm_regulator_get(dev, id, EXCLUSIVE_GET);
}
EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
static void regulator_action_disable(void *d)
{
struct regulator *r = (struct regulator *)d;
regulator_disable(r);
}
static int _devm_regulator_get_enable(struct device *dev, const char *id,
int get_type)
{
struct regulator *r;
int ret;
r = _devm_regulator_get(dev, id, get_type);
if (IS_ERR(r))
return PTR_ERR(r);
ret = regulator_enable(r);
if (!ret)
ret = devm_add_action_or_reset(dev, ®ulator_action_disable, r);
if (ret)
devm_regulator_put(r);
return ret;
}
/**
* devm_regulator_get_enable_optional - Resource managed regulator get and enable
* @dev: device to supply
* @id: supply name or regulator ID.
*
* Get and enable regulator for duration of the device life-time.
* regulator_disable() and regulator_put() are automatically called on driver
* detach. See regulator_get_optional() and regulator_enable() for more
* information.
*/
int devm_regulator_get_enable_optional(struct device *dev, const char *id)
{
return _devm_regulator_get_enable(dev, id, OPTIONAL_GET);
}
EXPORT_SYMBOL_GPL(devm_regulator_get_enable_optional);
/**
* devm_regulator_get_enable - Resource managed regulator get and enable
* @dev: device to supply
* @id: supply name or regulator ID.
*
* Get and enable regulator for duration of the device life-time.
* regulator_disable() and regulator_put() are automatically called on driver
* detach. See regulator_get() and regulator_enable() for more
* information.
*/
int devm_regulator_get_enable(struct device *dev, const char *id)
{
return _devm_regulator_get_enable(dev, id, NORMAL_GET);
}
EXPORT_SYMBOL_GPL(devm_regulator_get_enable);
/**
* devm_regulator_get_optional - Resource managed regulator_get_optional()
* @dev: device to supply
* @id: supply name or regulator ID.
*
* Managed regulator_get_optional(). Regulators returned from this
* function are automatically regulator_put() on driver detach. See
* regulator_get_optional() for more information.
*/
struct regulator *devm_regulator_get_optional(struct device *dev,
const char *id)
{
return _devm_regulator_get(dev, id, OPTIONAL_GET);
}
EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
static int devm_regulator_match(struct device *dev, void *res, void *data)
{
struct regulator **r = res;
if (!r || !*r) {
WARN_ON(!r || !*r);
return 0;
}
return *r == data;
}
/**
* devm_regulator_put - Resource managed regulator_put()
* @regulator: regulator to free
*
* Deallocate a regulator allocated with devm_regulator_get(). Normally
* this function will not need to be called and the resource management
* code will ensure that the resource is freed.
*/
void devm_regulator_put(struct regulator *regulator)
{
int rc;
rc = devres_release(regulator->dev, devm_regulator_release,
devm_regulator_match, regulator);
if (rc != 0)
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_put);
struct regulator_bulk_devres {
struct regulator_bulk_data *consumers;
int num_consumers;
};
static void devm_regulator_bulk_release(struct device *dev, void *res)
{
struct regulator_bulk_devres *devres = res;
regulator_bulk_free(devres->num_consumers, devres->consumers);
}
static int _devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers,
enum regulator_get_type get_type)
{
struct regulator_bulk_devres *devres;
int ret;
devres = devres_alloc(devm_regulator_bulk_release,
sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
ret = _regulator_bulk_get(dev, num_consumers, consumers, get_type);
if (!ret) {
devres->consumers = consumers;
devres->num_consumers = num_consumers;
devres_add(dev, devres);
} else {
devres_free(devres);
}
return ret;
}
/**
* devm_regulator_bulk_get - managed get multiple regulator consumers
*
* @dev: device to supply
* @num_consumers: number of consumers to register
* @consumers: configuration of consumers; clients are stored here.
*
* @return 0 on success, an errno on failure.
*
* This helper function allows drivers to get several regulator
* consumers in one operation with management, the regulators will
* automatically be freed when the device is unbound. If any of the
* regulators cannot be acquired then any regulators that were
* allocated will be freed before returning to the caller.
*/
int devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers)
{
return _devm_regulator_bulk_get(dev, num_consumers, consumers, NORMAL_GET);
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_get);
/**
* devm_regulator_bulk_get_exclusive - managed exclusive get of multiple
* regulator consumers
*
* @dev: device to supply
* @num_consumers: number of consumers to register
* @consumers: configuration of consumers; clients are stored here.
*
* @return 0 on success, an errno on failure.
*
* This helper function allows drivers to exclusively get several
* regulator consumers in one operation with management, the regulators
* will automatically be freed when the device is unbound. If any of
* the regulators cannot be acquired then any regulators that were
* allocated will be freed before returning to the caller.
*/
int devm_regulator_bulk_get_exclusive(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers)
{
return _devm_regulator_bulk_get(dev, num_consumers, consumers, EXCLUSIVE_GET);
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_exclusive);
/**
* devm_regulator_bulk_get_const - devm_regulator_bulk_get() w/ const data
*
* @dev: device to supply
* @num_consumers: number of consumers to register
* @in_consumers: const configuration of consumers
* @out_consumers: in_consumers is copied here and this is passed to
* devm_regulator_bulk_get().
*
* This is a convenience function to allow bulk regulator configuration
* to be stored "static const" in files.
*
* Return: 0 on success, an errno on failure.
*/
int devm_regulator_bulk_get_const(struct device *dev, int num_consumers,
const struct regulator_bulk_data *in_consumers,
struct regulator_bulk_data **out_consumers)
{
*out_consumers = devm_kmemdup(dev, in_consumers,
num_consumers * sizeof(*in_consumers),
GFP_KERNEL);
if (*out_consumers == NULL)
return -ENOMEM;
return devm_regulator_bulk_get(dev, num_consumers, *out_consumers);
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_const);
static int devm_regulator_bulk_match(struct device *dev, void *res,
void *data)
{
struct regulator_bulk_devres *match = res;
struct regulator_bulk_data *target = data;
/*
* We check the put uses same consumer list as the get did.
* We _could_ scan all entries in consumer array and check the
* regulators match but ATM I don't see the need. We can change this
* later if needed.
*/
return match->consumers == target;
}
/**
* devm_regulator_bulk_put - Resource managed regulator_bulk_put()
* @consumers: consumers to free
*
* Deallocate regulators allocated with devm_regulator_bulk_get(). Normally
* this function will not need to be called and the resource management
* code will ensure that the resource is freed.
*/
void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
{
int rc;
struct regulator *regulator = consumers[0].consumer;
rc = devres_release(regulator->dev, devm_regulator_bulk_release,
devm_regulator_bulk_match, consumers);
if (rc != 0)
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_put);
static void devm_regulator_bulk_disable(void *res)
{
struct regulator_bulk_devres *devres = res;
int i;
for (i = 0; i < devres->num_consumers; i++)
regulator_disable(devres->consumers[i].consumer);
}
/**
* devm_regulator_bulk_get_enable - managed get'n enable multiple regulators
*
* @dev: device to supply
* @num_consumers: number of consumers to register
* @id: list of supply names or regulator IDs
*
* @return 0 on success, an errno on failure.
*
* This helper function allows drivers to get several regulator
* consumers in one operation with management, the regulators will
* automatically be freed when the device is unbound. If any of the
* regulators cannot be acquired then any regulators that were
* allocated will be freed before returning to the caller.
*/
int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
const char * const *id)
{
struct regulator_bulk_devres *devres;
struct regulator_bulk_data *consumers;
int i, ret;
devres = devm_kmalloc(dev, sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
devres->consumers = devm_kcalloc(dev, num_consumers, sizeof(*consumers),
GFP_KERNEL);
consumers = devres->consumers;
if (!consumers)
return -ENOMEM;
devres->num_consumers = num_consumers;
for (i = 0; i < num_consumers; i++)
consumers[i].supply = id[i];
ret = devm_regulator_bulk_get(dev, num_consumers, consumers);
if (ret)
return ret;
for (i = 0; i < num_consumers; i++) {
ret = regulator_enable(consumers[i].consumer);
if (ret)
goto unwind;
}
ret = devm_add_action(dev, devm_regulator_bulk_disable, devres);
if (!ret)
return 0;
unwind:
while (--i >= 0)
regulator_disable(consumers[i].consumer);
devm_regulator_bulk_put(consumers);
return ret;
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_enable);
static void devm_rdev_release(struct device *dev, void *res)
{
regulator_unregister(*(struct regulator_dev **)res);
}
/**
* devm_regulator_register - Resource managed regulator_register()
* @dev: device to supply
* @regulator_desc: regulator to register
* @config: runtime configuration for regulator
*
* Called by regulator drivers to register a regulator. Returns a
* valid pointer to struct regulator_dev on success or an ERR_PTR() on
* error. The regulator will automatically be released when the device
* is unbound.
*/
struct regulator_dev *devm_regulator_register(struct device *dev,
const struct regulator_desc *regulator_desc,
const struct regulator_config *config)
{
struct regulator_dev **ptr, *rdev;
ptr = devres_alloc(devm_rdev_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
rdev = regulator_register(dev, regulator_desc, config);
if (!IS_ERR(rdev)) {
*ptr = rdev;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return rdev;
}
EXPORT_SYMBOL_GPL(devm_regulator_register);
struct regulator_supply_alias_match {
struct device *dev;
const char *id;
};
static int devm_regulator_match_supply_alias(struct device *dev, void *res,
void *data)
{
struct regulator_supply_alias_match *match = res;
struct regulator_supply_alias_match *target = data;
return match->dev == target->dev && strcmp(match->id, target->id) == 0;
}
static void devm_regulator_destroy_supply_alias(struct device *dev, void *res)
{
struct regulator_supply_alias_match *match = res;
regulator_unregister_supply_alias(match->dev, match->id);
}
/**
* devm_regulator_register_supply_alias - Resource managed
* regulator_register_supply_alias()
*
* @dev: device to supply
* @id: supply name or regulator ID
* @alias_dev: device that should be used to lookup the supply
* @alias_id: supply name or regulator ID that should be used to lookup the
* supply
*
* The supply alias will automatically be unregistered when the source
* device is unbound.
*/
int devm_regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
const char *alias_id)
{
struct regulator_supply_alias_match *match;
int ret;
match = devres_alloc(devm_regulator_destroy_supply_alias,
sizeof(struct regulator_supply_alias_match),
GFP_KERNEL);
if (!match)
return -ENOMEM;
match->dev = dev;
match->id = id;
ret = regulator_register_supply_alias(dev, id, alias_dev, alias_id);
if (ret < 0) {
devres_free(match);
return ret;
}
devres_add(dev, match);
return 0;
}
EXPORT_SYMBOL_GPL(devm_regulator_register_supply_alias);
static void devm_regulator_unregister_supply_alias(struct device *dev,
const char *id)
{
struct regulator_supply_alias_match match;
int rc;
match.dev = dev;
match.id = id;
rc = devres_release(dev, devm_regulator_destroy_supply_alias,
devm_regulator_match_supply_alias, &match);
if (rc != 0)
WARN_ON(rc);
}
/**
* devm_regulator_bulk_register_supply_alias - Managed register
* multiple aliases
*
* @dev: device to supply
* @id: list of supply names or regulator IDs
* @alias_dev: device that should be used to lookup the supply
* @alias_id: list of supply names or regulator IDs that should be used to
* lookup the supply
* @num_id: number of aliases to register
*
* @return 0 on success, an errno on failure.
*
* This helper function allows drivers to register several supply
* aliases in one operation, the aliases will be automatically
* unregisters when the source device is unbound. If any of the
* aliases cannot be registered any aliases that were registered
* will be removed before returning to the caller.
*/
int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
const char *const *alias_id,
int num_id)
{
int i;
int ret;
for (i = 0; i < num_id; ++i) {
ret = devm_regulator_register_supply_alias(dev, id[i],
alias_dev,
alias_id[i]);
if (ret < 0)
goto err;
}
return 0;
err:
dev_err(dev,
"Failed to create supply alias %s,%s -> %s,%s\n",
id[i], dev_name(dev), alias_id[i], dev_name(alias_dev));
while (--i >= 0)
devm_regulator_unregister_supply_alias(dev, id[i]);
return ret;
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias);
struct regulator_notifier_match {
struct regulator *regulator;
struct notifier_block *nb;
};
static int devm_regulator_match_notifier(struct device *dev, void *res,
void *data)
{
struct regulator_notifier_match *match = res;
struct regulator_notifier_match *target = data;
return match->regulator == target->regulator && match->nb == target->nb;
}
static void devm_regulator_destroy_notifier(struct device *dev, void *res)
{
struct regulator_notifier_match *match = res;
regulator_unregister_notifier(match->regulator, match->nb);
}
/**
* devm_regulator_register_notifier - Resource managed
* regulator_register_notifier
*
* @regulator: regulator source
* @nb: notifier block
*
* The notifier will be registers under the consumer device and be
* automatically be unregistered when the source device is unbound.
*/
int devm_regulator_register_notifier(struct regulator *regulator,
struct notifier_block *nb)
{
struct regulator_notifier_match *match;
int ret;
match = devres_alloc(devm_regulator_destroy_notifier,
sizeof(struct regulator_notifier_match),
GFP_KERNEL);
if (!match)
return -ENOMEM;
match->regulator = regulator;
match->nb = nb;
ret = regulator_register_notifier(regulator, nb);
if (ret < 0) {
devres_free(match);
return ret;
}
devres_add(regulator->dev, match);
return 0;
}
EXPORT_SYMBOL_GPL(devm_regulator_register_notifier);
/**
* devm_regulator_unregister_notifier - Resource managed
* regulator_unregister_notifier()
*
* @regulator: regulator source
* @nb: notifier block
*
* Unregister a notifier registered with devm_regulator_register_notifier().
* Normally this function will not need to be called and the resource
* management code will ensure that the resource is freed.
*/
void devm_regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb)
{
struct regulator_notifier_match match;
int rc;
match.regulator = regulator;
match.nb = nb;
rc = devres_release(regulator->dev, devm_regulator_destroy_notifier,
devm_regulator_match_notifier, &match);
if (rc != 0)
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_unregister_notifier);
static void regulator_irq_helper_drop(void *res)
{
regulator_irq_helper_cancel(&res);
}
/**
* devm_regulator_irq_helper - resource managed registration of IRQ based
* regulator event/error notifier
*
* @dev: device to which lifetime the helper's lifetime is
* bound.
* @d: IRQ helper descriptor.
* @irq: IRQ used to inform events/errors to be notified.
* @irq_flags: Extra IRQ flags to be OR'ed with the default
* IRQF_ONESHOT when requesting the (threaded) irq.
* @common_errs: Errors which can be flagged by this IRQ for all rdevs.
* When IRQ is re-enabled these errors will be cleared
* from all associated regulators
* @per_rdev_errs: Optional error flag array describing errors specific
* for only some of the regulators. These errors will be
* or'ed with common errors. If this is given the array
* should contain rdev_amount flags. Can be set to NULL
* if there is no regulator specific error flags for this
* IRQ.
* @rdev: Array of pointers to regulators associated with this
* IRQ.
* @rdev_amount: Amount of regulators associated with this IRQ.
*
* Return: handle to irq_helper or an ERR_PTR() encoded error code.
*/
void *devm_regulator_irq_helper(struct device *dev,
const struct regulator_irq_desc *d, int irq,
int irq_flags, int common_errs,
int *per_rdev_errs,
struct regulator_dev **rdev, int rdev_amount)
{
void *ptr;
int ret;
ptr = regulator_irq_helper(dev, d, irq, irq_flags, common_errs,
per_rdev_errs, rdev, rdev_amount);
if (IS_ERR(ptr))
return ptr;
ret = devm_add_action_or_reset(dev, regulator_irq_helper_drop, ptr);
if (ret)
return ERR_PTR(ret);
return ptr;
}
EXPORT_SYMBOL_GPL(devm_regulator_irq_helper);
| linux-master | drivers/regulator/devres.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tps65910.c -- TI tps65910
*
* Copyright 2010 Texas Instruments Inc.
*
* Author: Graeme Gregory <[email protected]>
* Author: Jorge Eduardo Candelaria <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/slab.h>
#include <linux/mfd/tps65910.h>
#include <linux/regulator/of_regulator.h>
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
#define EXT_SLEEP_CONTROL (TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 | \
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2 | \
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 | \
TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
/* supported VIO voltages in microvolts */
static const unsigned int VIO_VSEL_table[] = {
1500000, 1800000, 2500000, 3300000,
};
/* VSEL tables for TPS65910 specific LDOs and dcdc's */
/* supported VRTC voltages in microvolts */
static const unsigned int VRTC_VSEL_table[] = {
1800000,
};
/* supported VDD3 voltages in microvolts */
static const unsigned int VDD3_VSEL_table[] = {
5000000,
};
/* supported VDIG1 voltages in microvolts */
static const unsigned int VDIG1_VSEL_table[] = {
1200000, 1500000, 1800000, 2700000,
};
/* supported VDIG2 voltages in microvolts */
static const unsigned int VDIG2_VSEL_table[] = {
1000000, 1100000, 1200000, 1800000,
};
/* supported VPLL voltages in microvolts */
static const unsigned int VPLL_VSEL_table[] = {
1000000, 1100000, 1800000, 2500000,
};
/* supported VDAC voltages in microvolts */
static const unsigned int VDAC_VSEL_table[] = {
1800000, 2600000, 2800000, 2850000,
};
/* supported VAUX1 voltages in microvolts */
static const unsigned int VAUX1_VSEL_table[] = {
1800000, 2500000, 2800000, 2850000,
};
/* supported VAUX2 voltages in microvolts */
static const unsigned int VAUX2_VSEL_table[] = {
1800000, 2800000, 2900000, 3300000,
};
/* supported VAUX33 voltages in microvolts */
static const unsigned int VAUX33_VSEL_table[] = {
1800000, 2000000, 2800000, 3300000,
};
/* supported VMMC voltages in microvolts */
static const unsigned int VMMC_VSEL_table[] = {
1800000, 2800000, 3000000, 3300000,
};
/* supported BBCH voltages in microvolts */
static const unsigned int VBB_VSEL_table[] = {
3000000, 2520000, 3150000, 5000000,
};
struct tps_info {
const char *name;
const char *vin_name;
u8 n_voltages;
const unsigned int *voltage_table;
int enable_time_us;
};
static struct tps_info tps65910_regs[] = {
{
.name = "vrtc",
.vin_name = "vcc7",
.n_voltages = ARRAY_SIZE(VRTC_VSEL_table),
.voltage_table = VRTC_VSEL_table,
.enable_time_us = 2200,
},
{
.name = "vio",
.vin_name = "vccio",
.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
.voltage_table = VIO_VSEL_table,
.enable_time_us = 350,
},
{
.name = "vdd1",
.vin_name = "vcc1",
.enable_time_us = 350,
},
{
.name = "vdd2",
.vin_name = "vcc2",
.enable_time_us = 350,
},
{
.name = "vdd3",
.n_voltages = ARRAY_SIZE(VDD3_VSEL_table),
.voltage_table = VDD3_VSEL_table,
.enable_time_us = 200,
},
{
.name = "vdig1",
.vin_name = "vcc6",
.n_voltages = ARRAY_SIZE(VDIG1_VSEL_table),
.voltage_table = VDIG1_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vdig2",
.vin_name = "vcc6",
.n_voltages = ARRAY_SIZE(VDIG2_VSEL_table),
.voltage_table = VDIG2_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vpll",
.vin_name = "vcc5",
.n_voltages = ARRAY_SIZE(VPLL_VSEL_table),
.voltage_table = VPLL_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vdac",
.vin_name = "vcc5",
.n_voltages = ARRAY_SIZE(VDAC_VSEL_table),
.voltage_table = VDAC_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux1",
.vin_name = "vcc4",
.n_voltages = ARRAY_SIZE(VAUX1_VSEL_table),
.voltage_table = VAUX1_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux2",
.vin_name = "vcc4",
.n_voltages = ARRAY_SIZE(VAUX2_VSEL_table),
.voltage_table = VAUX2_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux33",
.vin_name = "vcc3",
.n_voltages = ARRAY_SIZE(VAUX33_VSEL_table),
.voltage_table = VAUX33_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vmmc",
.vin_name = "vcc3",
.n_voltages = ARRAY_SIZE(VMMC_VSEL_table),
.voltage_table = VMMC_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vbb",
.vin_name = "vcc7",
.n_voltages = ARRAY_SIZE(VBB_VSEL_table),
.voltage_table = VBB_VSEL_table,
},
};
static struct tps_info tps65911_regs[] = {
{
.name = "vrtc",
.vin_name = "vcc7",
.enable_time_us = 2200,
},
{
.name = "vio",
.vin_name = "vccio",
.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
.voltage_table = VIO_VSEL_table,
.enable_time_us = 350,
},
{
.name = "vdd1",
.vin_name = "vcc1",
.n_voltages = 0x4C,
.enable_time_us = 350,
},
{
.name = "vdd2",
.vin_name = "vcc2",
.n_voltages = 0x4C,
.enable_time_us = 350,
},
{
.name = "vddctrl",
.n_voltages = 0x44,
.enable_time_us = 900,
},
{
.name = "ldo1",
.vin_name = "vcc6",
.n_voltages = 0x33,
.enable_time_us = 420,
},
{
.name = "ldo2",
.vin_name = "vcc6",
.n_voltages = 0x33,
.enable_time_us = 420,
},
{
.name = "ldo3",
.vin_name = "vcc5",
.n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo4",
.vin_name = "vcc5",
.n_voltages = 0x33,
.enable_time_us = 230,
},
{
.name = "ldo5",
.vin_name = "vcc4",
.n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo6",
.vin_name = "vcc3",
.n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo7",
.vin_name = "vcc3",
.n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo8",
.vin_name = "vcc3",
.n_voltages = 0x1A,
.enable_time_us = 230,
},
};
#define EXT_CONTROL_REG_BITS(id, regs_offs, bits) (((regs_offs) << 8) | (bits))
static unsigned int tps65910_ext_sleep_control[] = {
0,
EXT_CONTROL_REG_BITS(VIO, 1, 0),
EXT_CONTROL_REG_BITS(VDD1, 1, 1),
EXT_CONTROL_REG_BITS(VDD2, 1, 2),
EXT_CONTROL_REG_BITS(VDD3, 1, 3),
EXT_CONTROL_REG_BITS(VDIG1, 0, 1),
EXT_CONTROL_REG_BITS(VDIG2, 0, 2),
EXT_CONTROL_REG_BITS(VPLL, 0, 6),
EXT_CONTROL_REG_BITS(VDAC, 0, 7),
EXT_CONTROL_REG_BITS(VAUX1, 0, 3),
EXT_CONTROL_REG_BITS(VAUX2, 0, 4),
EXT_CONTROL_REG_BITS(VAUX33, 0, 5),
EXT_CONTROL_REG_BITS(VMMC, 0, 0),
};
static unsigned int tps65911_ext_sleep_control[] = {
0,
EXT_CONTROL_REG_BITS(VIO, 1, 0),
EXT_CONTROL_REG_BITS(VDD1, 1, 1),
EXT_CONTROL_REG_BITS(VDD2, 1, 2),
EXT_CONTROL_REG_BITS(VDDCTRL, 1, 3),
EXT_CONTROL_REG_BITS(LDO1, 0, 1),
EXT_CONTROL_REG_BITS(LDO2, 0, 2),
EXT_CONTROL_REG_BITS(LDO3, 0, 7),
EXT_CONTROL_REG_BITS(LDO4, 0, 6),
EXT_CONTROL_REG_BITS(LDO5, 0, 3),
EXT_CONTROL_REG_BITS(LDO6, 0, 0),
EXT_CONTROL_REG_BITS(LDO7, 0, 5),
EXT_CONTROL_REG_BITS(LDO8, 0, 4),
};
struct tps65910_reg {
struct regulator_desc *desc;
struct tps65910 *mfd;
struct regulator_dev **rdev;
struct tps_info **info;
int num_regulators;
int mode;
int (*get_ctrl_reg)(int);
unsigned int *ext_sleep_control;
unsigned int board_ext_control[TPS65910_NUM_REGS];
};
static int tps65910_get_ctrl_register(int id)
{
switch (id) {
case TPS65910_REG_VRTC:
return TPS65910_VRTC;
case TPS65910_REG_VIO:
return TPS65910_VIO;
case TPS65910_REG_VDD1:
return TPS65910_VDD1;
case TPS65910_REG_VDD2:
return TPS65910_VDD2;
case TPS65910_REG_VDD3:
return TPS65910_VDD3;
case TPS65910_REG_VDIG1:
return TPS65910_VDIG1;
case TPS65910_REG_VDIG2:
return TPS65910_VDIG2;
case TPS65910_REG_VPLL:
return TPS65910_VPLL;
case TPS65910_REG_VDAC:
return TPS65910_VDAC;
case TPS65910_REG_VAUX1:
return TPS65910_VAUX1;
case TPS65910_REG_VAUX2:
return TPS65910_VAUX2;
case TPS65910_REG_VAUX33:
return TPS65910_VAUX33;
case TPS65910_REG_VMMC:
return TPS65910_VMMC;
case TPS65910_REG_VBB:
return TPS65910_BBCH;
default:
return -EINVAL;
}
}
static int tps65911_get_ctrl_register(int id)
{
switch (id) {
case TPS65910_REG_VRTC:
return TPS65910_VRTC;
case TPS65910_REG_VIO:
return TPS65910_VIO;
case TPS65910_REG_VDD1:
return TPS65910_VDD1;
case TPS65910_REG_VDD2:
return TPS65910_VDD2;
case TPS65911_REG_VDDCTRL:
return TPS65911_VDDCTRL;
case TPS65911_REG_LDO1:
return TPS65911_LDO1;
case TPS65911_REG_LDO2:
return TPS65911_LDO2;
case TPS65911_REG_LDO3:
return TPS65911_LDO3;
case TPS65911_REG_LDO4:
return TPS65911_LDO4;
case TPS65911_REG_LDO5:
return TPS65911_LDO5;
case TPS65911_REG_LDO6:
return TPS65911_LDO6;
case TPS65911_REG_LDO7:
return TPS65911_LDO7;
case TPS65911_REG_LDO8:
return TPS65911_LDO8;
default:
return -EINVAL;
}
}
static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
struct regmap *regmap = rdev_get_regmap(dev);
int reg, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
switch (mode) {
case REGULATOR_MODE_NORMAL:
return regmap_update_bits(regmap, reg,
LDO_ST_MODE_BIT | LDO_ST_ON_BIT,
LDO_ST_ON_BIT);
case REGULATOR_MODE_IDLE:
return regmap_set_bits(regmap, reg,
LDO_ST_ON_BIT | LDO_ST_MODE_BIT);
case REGULATOR_MODE_STANDBY:
return regmap_clear_bits(regmap, reg, LDO_ST_ON_BIT);
}
return -EINVAL;
}
static unsigned int tps65910_get_mode(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
struct regmap *regmap = rdev_get_regmap(dev);
int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
ret = regmap_read(regmap, reg, &value);
if (ret < 0)
return ret;
if (!(value & LDO_ST_ON_BIT))
return REGULATOR_MODE_STANDBY;
else if (value & LDO_ST_MODE_BIT)
return REGULATOR_MODE_IDLE;
else
return REGULATOR_MODE_NORMAL;
}
static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
{
struct regmap *regmap = rdev_get_regmap(dev);
int ret, id = rdev_get_id(dev);
int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
switch (id) {
case TPS65910_REG_VDD1:
ret = regmap_read(regmap, TPS65910_VDD1_OP, &opvsel);
if (ret < 0)
return ret;
ret = regmap_read(regmap, TPS65910_VDD1, &mult);
if (ret < 0)
return ret;
mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
ret = regmap_read(regmap, TPS65910_VDD1_SR, &srvsel);
if (ret < 0)
return ret;
sr = opvsel & VDD1_OP_CMD_MASK;
opvsel &= VDD1_OP_SEL_MASK;
srvsel &= VDD1_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65910_REG_VDD2:
ret = regmap_read(regmap, TPS65910_VDD2_OP, &opvsel);
if (ret < 0)
return ret;
ret = regmap_read(regmap, TPS65910_VDD2, &mult);
if (ret < 0)
return ret;
mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
ret = regmap_read(regmap, TPS65910_VDD2_SR, &srvsel);
if (ret < 0)
return ret;
sr = opvsel & VDD2_OP_CMD_MASK;
opvsel &= VDD2_OP_SEL_MASK;
srvsel &= VDD2_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65911_REG_VDDCTRL:
ret = regmap_read(regmap, TPS65911_VDDCTRL_OP, &opvsel);
if (ret < 0)
return ret;
ret = regmap_read(regmap, TPS65911_VDDCTRL_SR, &srvsel);
if (ret < 0)
return ret;
sr = opvsel & VDDCTRL_OP_CMD_MASK;
opvsel &= VDDCTRL_OP_SEL_MASK;
srvsel &= VDDCTRL_SR_SEL_MASK;
vselmax = 64;
break;
}
/* multiplier 0 == 1 but 2,3 normal */
if (!mult)
mult = 1;
if (sr) {
/* normalise to valid range */
if (srvsel < 3)
srvsel = 3;
if (srvsel > vselmax)
srvsel = vselmax;
return srvsel - 3;
} else {
/* normalise to valid range*/
if (opvsel < 3)
opvsel = 3;
if (opvsel > vselmax)
opvsel = vselmax;
return opvsel - 3;
}
return -EINVAL;
}
static int tps65910_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
struct regmap *regmap = rdev_get_regmap(dev);
int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
ret = regmap_read(regmap, reg, &value);
if (ret < 0)
return ret;
switch (id) {
case TPS65910_REG_VIO:
case TPS65910_REG_VDIG1:
case TPS65910_REG_VDIG2:
case TPS65910_REG_VPLL:
case TPS65910_REG_VDAC:
case TPS65910_REG_VAUX1:
case TPS65910_REG_VAUX2:
case TPS65910_REG_VAUX33:
case TPS65910_REG_VMMC:
value &= LDO_SEL_MASK;
value >>= LDO_SEL_SHIFT;
break;
case TPS65910_REG_VBB:
value &= BBCH_BBSEL_MASK;
value >>= BBCH_BBSEL_SHIFT;
break;
default:
return -EINVAL;
}
return value;
}
static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
{
return dev->desc->volt_table[0];
}
static int tps65911_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
struct regmap *regmap = rdev_get_regmap(dev);
int ret, id = rdev_get_id(dev);
unsigned int value, reg;
reg = pmic->get_ctrl_reg(id);
ret = regmap_read(regmap, reg, &value);
if (ret < 0)
return ret;
switch (id) {
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
value &= LDO1_SEL_MASK;
value >>= LDO_SEL_SHIFT;
break;
case TPS65911_REG_LDO3:
case TPS65911_REG_LDO5:
case TPS65911_REG_LDO6:
case TPS65911_REG_LDO7:
case TPS65911_REG_LDO8:
value &= LDO3_SEL_MASK;
value >>= LDO_SEL_SHIFT;
break;
case TPS65910_REG_VIO:
value &= LDO_SEL_MASK;
value >>= LDO_SEL_SHIFT;
break;
default:
return -EINVAL;
}
return value;
}
static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
unsigned selector)
{
struct regmap *regmap = rdev_get_regmap(dev);
int id = rdev_get_id(dev), vsel;
int dcdc_mult = 0;
switch (id) {
case TPS65910_REG_VDD1:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
if (dcdc_mult == 1)
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
regmap_update_bits(regmap, TPS65910_VDD1, VDD1_VGAIN_SEL_MASK,
dcdc_mult << VDD1_VGAIN_SEL_SHIFT);
regmap_write(regmap, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
if (dcdc_mult == 1)
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
regmap_update_bits(regmap, TPS65910_VDD2, VDD1_VGAIN_SEL_MASK,
dcdc_mult << VDD2_VGAIN_SEL_SHIFT);
regmap_write(regmap, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
vsel = selector + 3;
regmap_write(regmap, TPS65911_VDDCTRL_OP, vsel);
break;
}
return 0;
}
static int tps65910_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
struct regmap *regmap = rdev_get_regmap(dev);
int reg, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
switch (id) {
case TPS65910_REG_VIO:
case TPS65910_REG_VDIG1:
case TPS65910_REG_VDIG2:
case TPS65910_REG_VPLL:
case TPS65910_REG_VDAC:
case TPS65910_REG_VAUX1:
case TPS65910_REG_VAUX2:
case TPS65910_REG_VAUX33:
case TPS65910_REG_VMMC:
return regmap_update_bits(regmap, reg, LDO_SEL_MASK,
selector << LDO_SEL_SHIFT);
case TPS65910_REG_VBB:
return regmap_update_bits(regmap, reg, BBCH_BBSEL_MASK,
selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
}
static int tps65911_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
struct regmap *regmap = rdev_get_regmap(dev);
int reg, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
switch (id) {
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
return regmap_update_bits(regmap, reg, LDO1_SEL_MASK,
selector << LDO_SEL_SHIFT);
case TPS65911_REG_LDO3:
case TPS65911_REG_LDO5:
case TPS65911_REG_LDO6:
case TPS65911_REG_LDO7:
case TPS65911_REG_LDO8:
return regmap_update_bits(regmap, reg, LDO3_SEL_MASK,
selector << LDO_SEL_SHIFT);
case TPS65910_REG_VIO:
return regmap_update_bits(regmap, reg, LDO_SEL_MASK,
selector << LDO_SEL_SHIFT);
case TPS65910_REG_VBB:
return regmap_update_bits(regmap, reg, BBCH_BBSEL_MASK,
selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
}
static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
unsigned selector)
{
int volt, mult = 1, id = rdev_get_id(dev);
switch (id) {
case TPS65910_REG_VDD1:
case TPS65910_REG_VDD2:
mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
volt = VDD1_2_MIN_VOLT +
(selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
break;
default:
BUG();
return -EINVAL;
}
return volt * 100 * mult;
}
static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
int step_mv = 0, id = rdev_get_id(dev);
switch (id) {
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
/* The first 5 values of the selector correspond to 1V */
if (selector < 5)
selector = 0;
else
selector -= 4;
step_mv = 50;
break;
case TPS65911_REG_LDO3:
case TPS65911_REG_LDO5:
case TPS65911_REG_LDO6:
case TPS65911_REG_LDO7:
case TPS65911_REG_LDO8:
/* The first 3 values of the selector correspond to 1V */
if (selector < 3)
selector = 0;
else
selector -= 2;
step_mv = 100;
break;
case TPS65910_REG_VIO:
return pmic->info[id]->voltage_table[selector];
default:
return -EINVAL;
}
return (LDO_MIN_VOLT + selector * step_mv) * 1000;
}
/* Regulator ops (except VRTC) */
static const struct regulator_ops tps65910_ops_dcdc = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65910_get_voltage_dcdc_sel,
.set_voltage_sel = tps65910_set_voltage_dcdc_sel,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.list_voltage = tps65910_list_voltage_dcdc,
.map_voltage = regulator_map_voltage_ascend,
};
static const struct regulator_ops tps65910_ops_vdd3 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage = tps65910_get_voltage_vdd3,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
};
static const struct regulator_ops tps65910_ops_vbb = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65910_get_voltage_sel,
.set_voltage_sel = tps65910_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
};
static const struct regulator_ops tps65910_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65910_get_voltage_sel,
.set_voltage_sel = tps65910_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
};
static const struct regulator_ops tps65911_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65911_get_voltage_sel,
.set_voltage_sel = tps65911_set_voltage_sel,
.list_voltage = tps65911_list_voltage,
.map_voltage = regulator_map_voltage_ascend,
};
static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
int id, int ext_sleep_config)
{
struct tps65910 *mfd = pmic->mfd;
u8 regoffs = (pmic->ext_sleep_control[id] >> 8) & 0xFF;
u8 bit_pos = (1 << pmic->ext_sleep_control[id] & 0xFF);
int ret;
/*
* Regulator can not be control from multiple external input EN1, EN2
* and EN3 together.
*/
if (ext_sleep_config & EXT_SLEEP_CONTROL) {
int en_count;
en_count = ((ext_sleep_config &
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1) != 0);
en_count += ((ext_sleep_config &
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2) != 0);
en_count += ((ext_sleep_config &
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3) != 0);
en_count += ((ext_sleep_config &
TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP) != 0);
if (en_count > 1) {
dev_err(mfd->dev,
"External sleep control flag is not proper\n");
return -EINVAL;
}
}
pmic->board_ext_control[id] = ext_sleep_config;
/* External EN1 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
ret = regmap_set_bits(mfd->regmap,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
else
ret = regmap_clear_bits(mfd->regmap,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring external control EN1\n");
return ret;
}
/* External EN2 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
ret = regmap_set_bits(mfd->regmap,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
else
ret = regmap_clear_bits(mfd->regmap,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring external control EN2\n");
return ret;
}
/* External EN3 control for TPS65910 LDO only */
if ((tps65910_chip_id(mfd) == TPS65910) &&
(id >= TPS65910_REG_VDIG1)) {
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
ret = regmap_set_bits(mfd->regmap,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
else
ret = regmap_clear_bits(mfd->regmap,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring external control EN3\n");
return ret;
}
}
/* Return if no external control is selected */
if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
/* Clear all sleep controls */
ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret)
ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
if (ret < 0)
dev_err(mfd->dev,
"Error in configuring SLEEP register\n");
return ret;
}
/*
* For regulator that has separate operational and sleep register make
* sure that operational is used and clear sleep register to turn
* regulator off when external control is inactive
*/
if ((id == TPS65910_REG_VDD1) ||
(id == TPS65910_REG_VDD2) ||
((id == TPS65911_REG_VDDCTRL) &&
(tps65910_chip_id(mfd) == TPS65911))) {
int op_reg_add = pmic->get_ctrl_reg(id) + 1;
int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
int opvsel, srvsel;
ret = regmap_read(mfd->regmap, op_reg_add, &opvsel);
if (ret < 0)
return ret;
ret = regmap_read(mfd->regmap, sr_reg_add, &srvsel);
if (ret < 0)
return ret;
if (opvsel & VDD1_OP_CMD_MASK) {
u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
ret = regmap_write(mfd->regmap, op_reg_add, reg_val);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring op register\n");
return ret;
}
}
ret = regmap_write(mfd->regmap, sr_reg_add, 0);
if (ret < 0) {
dev_err(mfd->dev, "Error in setting sr register\n");
return ret;
}
}
ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret) {
if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
ret = regmap_set_bits(mfd->regmap,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
else
ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
}
if (ret < 0)
dev_err(mfd->dev,
"Error in configuring SLEEP register\n");
return ret;
}
#ifdef CONFIG_OF
static struct of_regulator_match tps65910_matches[] = {
{ .name = "vrtc", .driver_data = (void *) &tps65910_regs[0] },
{ .name = "vio", .driver_data = (void *) &tps65910_regs[1] },
{ .name = "vdd1", .driver_data = (void *) &tps65910_regs[2] },
{ .name = "vdd2", .driver_data = (void *) &tps65910_regs[3] },
{ .name = "vdd3", .driver_data = (void *) &tps65910_regs[4] },
{ .name = "vdig1", .driver_data = (void *) &tps65910_regs[5] },
{ .name = "vdig2", .driver_data = (void *) &tps65910_regs[6] },
{ .name = "vpll", .driver_data = (void *) &tps65910_regs[7] },
{ .name = "vdac", .driver_data = (void *) &tps65910_regs[8] },
{ .name = "vaux1", .driver_data = (void *) &tps65910_regs[9] },
{ .name = "vaux2", .driver_data = (void *) &tps65910_regs[10] },
{ .name = "vaux33", .driver_data = (void *) &tps65910_regs[11] },
{ .name = "vmmc", .driver_data = (void *) &tps65910_regs[12] },
{ .name = "vbb", .driver_data = (void *) &tps65910_regs[13] },
};
static struct of_regulator_match tps65911_matches[] = {
{ .name = "vrtc", .driver_data = (void *) &tps65911_regs[0] },
{ .name = "vio", .driver_data = (void *) &tps65911_regs[1] },
{ .name = "vdd1", .driver_data = (void *) &tps65911_regs[2] },
{ .name = "vdd2", .driver_data = (void *) &tps65911_regs[3] },
{ .name = "vddctrl", .driver_data = (void *) &tps65911_regs[4] },
{ .name = "ldo1", .driver_data = (void *) &tps65911_regs[5] },
{ .name = "ldo2", .driver_data = (void *) &tps65911_regs[6] },
{ .name = "ldo3", .driver_data = (void *) &tps65911_regs[7] },
{ .name = "ldo4", .driver_data = (void *) &tps65911_regs[8] },
{ .name = "ldo5", .driver_data = (void *) &tps65911_regs[9] },
{ .name = "ldo6", .driver_data = (void *) &tps65911_regs[10] },
{ .name = "ldo7", .driver_data = (void *) &tps65911_regs[11] },
{ .name = "ldo8", .driver_data = (void *) &tps65911_regs[12] },
};
static struct tps65910_board *tps65910_parse_dt_reg_data(
struct platform_device *pdev,
struct of_regulator_match **tps65910_reg_matches)
{
struct tps65910_board *pmic_plat_data;
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
struct device_node *np, *regulators;
struct of_regulator_match *matches;
unsigned int prop;
int idx = 0, ret, count;
pmic_plat_data = devm_kzalloc(&pdev->dev, sizeof(*pmic_plat_data),
GFP_KERNEL);
if (!pmic_plat_data)
return NULL;
np = pdev->dev.parent->of_node;
regulators = of_get_child_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
}
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
count = ARRAY_SIZE(tps65910_matches);
matches = tps65910_matches;
break;
case TPS65911:
count = ARRAY_SIZE(tps65911_matches);
matches = tps65911_matches;
break;
default:
of_node_put(regulators);
dev_err(&pdev->dev, "Invalid tps chip version\n");
return NULL;
}
ret = of_regulator_match(&pdev->dev, regulators, matches, count);
of_node_put(regulators);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
ret);
return NULL;
}
*tps65910_reg_matches = matches;
for (idx = 0; idx < count; idx++) {
if (!matches[idx].of_node)
continue;
pmic_plat_data->tps65910_pmic_init_data[idx] =
matches[idx].init_data;
ret = of_property_read_u32(matches[idx].of_node,
"ti,regulator-ext-sleep-control", &prop);
if (!ret)
pmic_plat_data->regulator_ext_sleep_control[idx] = prop;
}
return pmic_plat_data;
}
#else
static inline struct tps65910_board *tps65910_parse_dt_reg_data(
struct platform_device *pdev,
struct of_regulator_match **tps65910_reg_matches)
{
*tps65910_reg_matches = NULL;
return NULL;
}
#endif
static int tps65910_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct tps_info *info;
struct regulator_dev *rdev;
struct tps65910_reg *pmic;
struct tps65910_board *pmic_plat_data;
struct of_regulator_match *tps65910_reg_matches = NULL;
int i, err;
pmic_plat_data = dev_get_platdata(tps65910->dev);
if (!pmic_plat_data && tps65910->dev->of_node)
pmic_plat_data = tps65910_parse_dt_reg_data(pdev,
&tps65910_reg_matches);
if (!pmic_plat_data) {
dev_err(&pdev->dev, "Platform data not found\n");
return -EINVAL;
}
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
pmic->mfd = tps65910;
platform_set_drvdata(pdev, pmic);
/* Give control of all register to control port */
err = regmap_set_bits(pmic->mfd->regmap, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
if (err < 0)
return err;
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
BUILD_BUG_ON(TPS65910_NUM_REGS < ARRAY_SIZE(tps65910_regs));
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
pmic->ext_sleep_control = tps65910_ext_sleep_control;
info = tps65910_regs;
/* Work around silicon erratum SWCZ010: output programmed
* voltage level can go higher than expected or crash
* Workaround: use no synchronization of DCDC clocks
*/
regmap_clear_bits(pmic->mfd->regmap, TPS65910_DCDCCTRL,
DCDCCTRL_DCDCCKSYNC_MASK);
break;
case TPS65911:
BUILD_BUG_ON(TPS65910_NUM_REGS < ARRAY_SIZE(tps65911_regs));
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65911_regs);
pmic->ext_sleep_control = tps65911_ext_sleep_control;
info = tps65911_regs;
break;
default:
dev_err(&pdev->dev, "Invalid tps chip version\n");
return -ENODEV;
}
pmic->desc = devm_kcalloc(&pdev->dev,
pmic->num_regulators,
sizeof(struct regulator_desc),
GFP_KERNEL);
if (!pmic->desc)
return -ENOMEM;
pmic->info = devm_kcalloc(&pdev->dev,
pmic->num_regulators,
sizeof(struct tps_info *),
GFP_KERNEL);
if (!pmic->info)
return -ENOMEM;
pmic->rdev = devm_kcalloc(&pdev->dev,
pmic->num_regulators,
sizeof(struct regulator_dev *),
GFP_KERNEL);
if (!pmic->rdev)
return -ENOMEM;
for (i = 0; i < pmic->num_regulators; i++, info++) {
/* Register the regulators */
pmic->info[i] = info;
pmic->desc[i].name = info->name;
pmic->desc[i].supply_name = info->vin_name;
pmic->desc[i].id = i;
pmic->desc[i].n_voltages = info->n_voltages;
pmic->desc[i].enable_time = info->enable_time_us;
if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
pmic->desc[i].ops = &tps65910_ops_dcdc;
pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
VDD1_2_NUM_VOLT_COARSE;
pmic->desc[i].ramp_delay = 12500;
} else if (i == TPS65910_REG_VDD3) {
if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops_vdd3;
pmic->desc[i].volt_table = info->voltage_table;
} else {
pmic->desc[i].ops = &tps65910_ops_dcdc;
pmic->desc[i].ramp_delay = 5000;
}
} else if (i == TPS65910_REG_VBB &&
tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops_vbb;
pmic->desc[i].volt_table = info->voltage_table;
} else {
if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops;
pmic->desc[i].volt_table = info->voltage_table;
} else {
pmic->desc[i].ops = &tps65911_ops;
}
}
err = tps65910_set_ext_sleep_config(pmic, i,
pmic_plat_data->regulator_ext_sleep_control[i]);
/*
* Failing on regulator for configuring externally control
* is not a serious issue, just throw warning.
*/
if (err < 0)
dev_warn(tps65910->dev,
"Failed to initialise ext control config\n");
pmic->desc[i].type = REGULATOR_VOLTAGE;
pmic->desc[i].owner = THIS_MODULE;
pmic->desc[i].enable_reg = pmic->get_ctrl_reg(i);
pmic->desc[i].enable_mask = TPS65910_SUPPLY_STATE_ENABLED;
config.dev = tps65910->dev;
config.init_data = pmic_plat_data->tps65910_pmic_init_data[i];
config.driver_data = pmic;
config.regmap = tps65910->regmap;
if (tps65910_reg_matches)
config.of_node = tps65910_reg_matches[i].of_node;
rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
&config);
if (IS_ERR(rdev))
return dev_err_probe(tps65910->dev, PTR_ERR(rdev),
"failed to register %s regulator\n",
pdev->name);
/* Save regulator for cleanup */
pmic->rdev[i] = rdev;
}
return 0;
}
static void tps65910_shutdown(struct platform_device *pdev)
{
struct tps65910_reg *pmic = platform_get_drvdata(pdev);
int i;
/*
* Before bootloader jumps to kernel, it makes sure that required
* external control signals are in desired state so that given rails
* can be configure accordingly.
* If rails are configured to be controlled from external control
* then before shutting down/rebooting the system, the external
* control configuration need to be remove from the rails so that
* its output will be available as per register programming even
* if external controls are removed. This is require when the POR
* value of the control signals are not in active state and before
* bootloader initializes it, the system requires the rail output
* to be active for booting.
*/
for (i = 0; i < pmic->num_regulators; i++) {
int err;
if (!pmic->rdev[i])
continue;
err = tps65910_set_ext_sleep_config(pmic, i, 0);
if (err < 0)
dev_err(&pdev->dev,
"Error in clearing external control\n");
}
}
static struct platform_driver tps65910_driver = {
.driver = {
.name = "tps65910-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tps65910_probe,
.shutdown = tps65910_shutdown,
};
static int __init tps65910_init(void)
{
return platform_driver_register(&tps65910_driver);
}
subsys_initcall(tps65910_init);
static void __exit tps65910_cleanup(void)
{
platform_driver_unregister(&tps65910_driver);
}
module_exit(tps65910_cleanup);
MODULE_AUTHOR("Graeme Gregory <[email protected]>");
MODULE_DESCRIPTION("TPS65910/TPS65911 voltage regulator driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:tps65910-pmic");
| linux-master | drivers/regulator/tps65910-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/devm-helpers.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/ktime.h>
#include <linux/regulator/driver.h>
#include <linux/regmap.h>
#include <linux/list.h>
#include <linux/mfd/syscon.h>
#include <linux/io.h>
/* Pin control enable input pins. */
#define SPMI_REGULATOR_PIN_CTRL_ENABLE_NONE 0x00
#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN0 0x01
#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN1 0x02
#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN2 0x04
#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN3 0x08
#define SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT 0x10
/* Pin control high power mode input pins. */
#define SPMI_REGULATOR_PIN_CTRL_HPM_NONE 0x00
#define SPMI_REGULATOR_PIN_CTRL_HPM_EN0 0x01
#define SPMI_REGULATOR_PIN_CTRL_HPM_EN1 0x02
#define SPMI_REGULATOR_PIN_CTRL_HPM_EN2 0x04
#define SPMI_REGULATOR_PIN_CTRL_HPM_EN3 0x08
#define SPMI_REGULATOR_PIN_CTRL_HPM_SLEEP_B 0x10
#define SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT 0x20
/*
* Used with enable parameters to specify that hardware default register values
* should be left unaltered.
*/
#define SPMI_REGULATOR_USE_HW_DEFAULT 2
/* Soft start strength of a voltage switch type regulator */
enum spmi_vs_soft_start_str {
SPMI_VS_SOFT_START_STR_0P05_UA = 0,
SPMI_VS_SOFT_START_STR_0P25_UA,
SPMI_VS_SOFT_START_STR_0P55_UA,
SPMI_VS_SOFT_START_STR_0P75_UA,
SPMI_VS_SOFT_START_STR_HW_DEFAULT,
};
/**
* struct spmi_regulator_init_data - spmi-regulator initialization data
* @pin_ctrl_enable: Bit mask specifying which hardware pins should be
* used to enable the regulator, if any
* Value should be an ORing of
* SPMI_REGULATOR_PIN_CTRL_ENABLE_* constants. If
* the bit specified by
* SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT is
* set, then pin control enable hardware registers
* will not be modified.
* @pin_ctrl_hpm: Bit mask specifying which hardware pins should be
* used to force the regulator into high power
* mode, if any
* Value should be an ORing of
* SPMI_REGULATOR_PIN_CTRL_HPM_* constants. If
* the bit specified by
* SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT is
* set, then pin control mode hardware registers
* will not be modified.
* @vs_soft_start_strength: This parameter sets the soft start strength for
* voltage switch type regulators. Its value
* should be one of SPMI_VS_SOFT_START_STR_*. If
* its value is SPMI_VS_SOFT_START_STR_HW_DEFAULT,
* then the soft start strength will be left at its
* default hardware value.
*/
struct spmi_regulator_init_data {
unsigned pin_ctrl_enable;
unsigned pin_ctrl_hpm;
enum spmi_vs_soft_start_str vs_soft_start_strength;
};
/* These types correspond to unique register layouts. */
enum spmi_regulator_logical_type {
SPMI_REGULATOR_LOGICAL_TYPE_SMPS,
SPMI_REGULATOR_LOGICAL_TYPE_LDO,
SPMI_REGULATOR_LOGICAL_TYPE_VS,
SPMI_REGULATOR_LOGICAL_TYPE_BOOST,
SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS,
SPMI_REGULATOR_LOGICAL_TYPE_BOOST_BYP,
SPMI_REGULATOR_LOGICAL_TYPE_LN_LDO,
SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS,
SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS,
SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO,
SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS426,
SPMI_REGULATOR_LOGICAL_TYPE_HFS430,
SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3,
SPMI_REGULATOR_LOGICAL_TYPE_LDO_510,
SPMI_REGULATOR_LOGICAL_TYPE_HFSMPS,
};
enum spmi_regulator_type {
SPMI_REGULATOR_TYPE_BUCK = 0x03,
SPMI_REGULATOR_TYPE_LDO = 0x04,
SPMI_REGULATOR_TYPE_VS = 0x05,
SPMI_REGULATOR_TYPE_BOOST = 0x1b,
SPMI_REGULATOR_TYPE_FTS = 0x1c,
SPMI_REGULATOR_TYPE_BOOST_BYP = 0x1f,
SPMI_REGULATOR_TYPE_ULT_LDO = 0x21,
SPMI_REGULATOR_TYPE_ULT_BUCK = 0x22,
};
enum spmi_regulator_subtype {
SPMI_REGULATOR_SUBTYPE_GP_CTL = 0x08,
SPMI_REGULATOR_SUBTYPE_RF_CTL = 0x09,
SPMI_REGULATOR_SUBTYPE_N50 = 0x01,
SPMI_REGULATOR_SUBTYPE_N150 = 0x02,
SPMI_REGULATOR_SUBTYPE_N300 = 0x03,
SPMI_REGULATOR_SUBTYPE_N600 = 0x04,
SPMI_REGULATOR_SUBTYPE_N1200 = 0x05,
SPMI_REGULATOR_SUBTYPE_N600_ST = 0x06,
SPMI_REGULATOR_SUBTYPE_N1200_ST = 0x07,
SPMI_REGULATOR_SUBTYPE_N900_ST = 0x14,
SPMI_REGULATOR_SUBTYPE_N300_ST = 0x15,
SPMI_REGULATOR_SUBTYPE_P50 = 0x08,
SPMI_REGULATOR_SUBTYPE_P150 = 0x09,
SPMI_REGULATOR_SUBTYPE_P300 = 0x0a,
SPMI_REGULATOR_SUBTYPE_P600 = 0x0b,
SPMI_REGULATOR_SUBTYPE_P1200 = 0x0c,
SPMI_REGULATOR_SUBTYPE_LN = 0x10,
SPMI_REGULATOR_SUBTYPE_LV_P50 = 0x28,
SPMI_REGULATOR_SUBTYPE_LV_P150 = 0x29,
SPMI_REGULATOR_SUBTYPE_LV_P300 = 0x2a,
SPMI_REGULATOR_SUBTYPE_LV_P600 = 0x2b,
SPMI_REGULATOR_SUBTYPE_LV_P1200 = 0x2c,
SPMI_REGULATOR_SUBTYPE_LV_P450 = 0x2d,
SPMI_REGULATOR_SUBTYPE_HT_N300_ST = 0x30,
SPMI_REGULATOR_SUBTYPE_HT_N600_ST = 0x31,
SPMI_REGULATOR_SUBTYPE_HT_N1200_ST = 0x32,
SPMI_REGULATOR_SUBTYPE_HT_LVP150 = 0x3b,
SPMI_REGULATOR_SUBTYPE_HT_LVP300 = 0x3c,
SPMI_REGULATOR_SUBTYPE_L660_N300_ST = 0x42,
SPMI_REGULATOR_SUBTYPE_L660_N600_ST = 0x43,
SPMI_REGULATOR_SUBTYPE_L660_P50 = 0x46,
SPMI_REGULATOR_SUBTYPE_L660_P150 = 0x47,
SPMI_REGULATOR_SUBTYPE_L660_P600 = 0x49,
SPMI_REGULATOR_SUBTYPE_L660_LVP150 = 0x4d,
SPMI_REGULATOR_SUBTYPE_L660_LVP600 = 0x4f,
SPMI_REGULATOR_SUBTYPE_LV100 = 0x01,
SPMI_REGULATOR_SUBTYPE_LV300 = 0x02,
SPMI_REGULATOR_SUBTYPE_MV300 = 0x08,
SPMI_REGULATOR_SUBTYPE_MV500 = 0x09,
SPMI_REGULATOR_SUBTYPE_HDMI = 0x10,
SPMI_REGULATOR_SUBTYPE_OTG = 0x11,
SPMI_REGULATOR_SUBTYPE_5V_BOOST = 0x01,
SPMI_REGULATOR_SUBTYPE_FTS_CTL = 0x08,
SPMI_REGULATOR_SUBTYPE_FTS2p5_CTL = 0x09,
SPMI_REGULATOR_SUBTYPE_FTS426_CTL = 0x0a,
SPMI_REGULATOR_SUBTYPE_BB_2A = 0x01,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL1 = 0x0d,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL2 = 0x0e,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL3 = 0x0f,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL4 = 0x10,
SPMI_REGULATOR_SUBTYPE_HFS430 = 0x0a,
SPMI_REGULATOR_SUBTYPE_HT_P150 = 0x35,
SPMI_REGULATOR_SUBTYPE_HT_P600 = 0x3d,
SPMI_REGULATOR_SUBTYPE_HFSMPS_510 = 0x0a,
SPMI_REGULATOR_SUBTYPE_FTSMPS_510 = 0x0b,
SPMI_REGULATOR_SUBTYPE_LV_P150_510 = 0x71,
SPMI_REGULATOR_SUBTYPE_LV_P300_510 = 0x72,
SPMI_REGULATOR_SUBTYPE_LV_P600_510 = 0x73,
SPMI_REGULATOR_SUBTYPE_N300_510 = 0x6a,
SPMI_REGULATOR_SUBTYPE_N600_510 = 0x6b,
SPMI_REGULATOR_SUBTYPE_N1200_510 = 0x6c,
SPMI_REGULATOR_SUBTYPE_MV_P50_510 = 0x7a,
SPMI_REGULATOR_SUBTYPE_MV_P150_510 = 0x7b,
SPMI_REGULATOR_SUBTYPE_MV_P600_510 = 0x7d,
};
enum spmi_common_regulator_registers {
SPMI_COMMON_REG_DIG_MAJOR_REV = 0x01,
SPMI_COMMON_REG_TYPE = 0x04,
SPMI_COMMON_REG_SUBTYPE = 0x05,
SPMI_COMMON_REG_VOLTAGE_RANGE = 0x40,
SPMI_COMMON_REG_VOLTAGE_SET = 0x41,
SPMI_COMMON_REG_MODE = 0x45,
SPMI_COMMON_REG_ENABLE = 0x46,
SPMI_COMMON_REG_PULL_DOWN = 0x48,
SPMI_COMMON_REG_SOFT_START = 0x4c,
SPMI_COMMON_REG_STEP_CTRL = 0x61,
};
/*
* Second common register layout used by newer devices starting with ftsmps426
* Note that some of the registers from the first common layout remain
* unchanged and their definition is not duplicated.
*/
enum spmi_ftsmps426_regulator_registers {
SPMI_FTSMPS426_REG_VOLTAGE_LSB = 0x40,
SPMI_FTSMPS426_REG_VOLTAGE_MSB = 0x41,
SPMI_FTSMPS426_REG_VOLTAGE_ULS_LSB = 0x68,
SPMI_FTSMPS426_REG_VOLTAGE_ULS_MSB = 0x69,
};
/*
* Third common register layout
*/
enum spmi_hfsmps_regulator_registers {
SPMI_HFSMPS_REG_STEP_CTRL = 0x3c,
SPMI_HFSMPS_REG_PULL_DOWN = 0xa0,
};
enum spmi_vs_registers {
SPMI_VS_REG_OCP = 0x4a,
SPMI_VS_REG_SOFT_START = 0x4c,
};
enum spmi_boost_registers {
SPMI_BOOST_REG_CURRENT_LIMIT = 0x4a,
};
enum spmi_boost_byp_registers {
SPMI_BOOST_BYP_REG_CURRENT_LIMIT = 0x4b,
};
enum spmi_saw3_registers {
SAW3_SECURE = 0x00,
SAW3_ID = 0x04,
SAW3_SPM_STS = 0x0C,
SAW3_AVS_STS = 0x10,
SAW3_PMIC_STS = 0x14,
SAW3_RST = 0x18,
SAW3_VCTL = 0x1C,
SAW3_AVS_CTL = 0x20,
SAW3_AVS_LIMIT = 0x24,
SAW3_AVS_DLY = 0x28,
SAW3_AVS_HYSTERESIS = 0x2C,
SAW3_SPM_STS2 = 0x38,
SAW3_SPM_PMIC_DATA_3 = 0x4C,
SAW3_VERSION = 0xFD0,
};
/* Used for indexing into ctrl_reg. These are offets from 0x40 */
enum spmi_common_control_register_index {
SPMI_COMMON_IDX_VOLTAGE_RANGE = 0,
SPMI_COMMON_IDX_VOLTAGE_SET = 1,
SPMI_COMMON_IDX_MODE = 5,
SPMI_COMMON_IDX_ENABLE = 6,
};
/* Common regulator control register layout */
#define SPMI_COMMON_ENABLE_MASK 0x80
#define SPMI_COMMON_ENABLE 0x80
#define SPMI_COMMON_DISABLE 0x00
#define SPMI_COMMON_ENABLE_FOLLOW_HW_EN3_MASK 0x08
#define SPMI_COMMON_ENABLE_FOLLOW_HW_EN2_MASK 0x04
#define SPMI_COMMON_ENABLE_FOLLOW_HW_EN1_MASK 0x02
#define SPMI_COMMON_ENABLE_FOLLOW_HW_EN0_MASK 0x01
#define SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK 0x0f
/* Common regulator mode register layout */
#define SPMI_COMMON_MODE_HPM_MASK 0x80
#define SPMI_COMMON_MODE_AUTO_MASK 0x40
#define SPMI_COMMON_MODE_BYPASS_MASK 0x20
#define SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK 0x10
#define SPMI_COMMON_MODE_FOLLOW_HW_EN3_MASK 0x08
#define SPMI_COMMON_MODE_FOLLOW_HW_EN2_MASK 0x04
#define SPMI_COMMON_MODE_FOLLOW_HW_EN1_MASK 0x02
#define SPMI_COMMON_MODE_FOLLOW_HW_EN0_MASK 0x01
#define SPMI_COMMON_MODE_FOLLOW_ALL_MASK 0x1f
#define SPMI_FTSMPS426_MODE_BYPASS_MASK 3
#define SPMI_FTSMPS426_MODE_RETENTION_MASK 4
#define SPMI_FTSMPS426_MODE_LPM_MASK 5
#define SPMI_FTSMPS426_MODE_AUTO_MASK 6
#define SPMI_FTSMPS426_MODE_HPM_MASK 7
#define SPMI_FTSMPS426_MODE_MASK 0x07
/* Third common regulator mode register values */
#define SPMI_HFSMPS_MODE_BYPASS_MASK 2
#define SPMI_HFSMPS_MODE_RETENTION_MASK 3
#define SPMI_HFSMPS_MODE_LPM_MASK 4
#define SPMI_HFSMPS_MODE_AUTO_MASK 6
#define SPMI_HFSMPS_MODE_HPM_MASK 7
#define SPMI_HFSMPS_MODE_MASK 0x07
/* Common regulator pull down control register layout */
#define SPMI_COMMON_PULL_DOWN_ENABLE_MASK 0x80
/* LDO regulator current limit control register layout */
#define SPMI_LDO_CURRENT_LIMIT_ENABLE_MASK 0x80
/* LDO regulator soft start control register layout */
#define SPMI_LDO_SOFT_START_ENABLE_MASK 0x80
/* VS regulator over current protection control register layout */
#define SPMI_VS_OCP_OVERRIDE 0x01
#define SPMI_VS_OCP_NO_OVERRIDE 0x00
/* VS regulator soft start control register layout */
#define SPMI_VS_SOFT_START_ENABLE_MASK 0x80
#define SPMI_VS_SOFT_START_SEL_MASK 0x03
/* Boost regulator current limit control register layout */
#define SPMI_BOOST_CURRENT_LIMIT_ENABLE_MASK 0x80
#define SPMI_BOOST_CURRENT_LIMIT_MASK 0x07
#define SPMI_VS_OCP_DEFAULT_MAX_RETRIES 10
#define SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS 30
#define SPMI_VS_OCP_FALL_DELAY_US 90
#define SPMI_VS_OCP_FAULT_DELAY_US 20000
#define SPMI_FTSMPS_STEP_CTRL_STEP_MASK 0x18
#define SPMI_FTSMPS_STEP_CTRL_STEP_SHIFT 3
#define SPMI_FTSMPS_STEP_CTRL_DELAY_MASK 0x07
#define SPMI_FTSMPS_STEP_CTRL_DELAY_SHIFT 0
/* Clock rate in kHz of the FTSMPS regulator reference clock. */
#define SPMI_FTSMPS_CLOCK_RATE 19200
/* Minimum voltage stepper delay for each step. */
#define SPMI_FTSMPS_STEP_DELAY 8
#define SPMI_DEFAULT_STEP_DELAY 20
/*
* The ratio SPMI_FTSMPS_STEP_MARGIN_NUM/SPMI_FTSMPS_STEP_MARGIN_DEN is used to
* adjust the step rate in order to account for oscillator variance.
*/
#define SPMI_FTSMPS_STEP_MARGIN_NUM 4
#define SPMI_FTSMPS_STEP_MARGIN_DEN 5
/* slew_rate has units of uV/us. */
#define SPMI_HFSMPS_SLEW_RATE_38p4 38400
#define SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK 0x03
#define SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT 0
/* Clock rate in kHz of the FTSMPS426 regulator reference clock. */
#define SPMI_FTSMPS426_CLOCK_RATE 4800
#define SPMI_HFS430_CLOCK_RATE 1600
/* Minimum voltage stepper delay for each step. */
#define SPMI_FTSMPS426_STEP_DELAY 2
/*
* The ratio SPMI_FTSMPS426_STEP_MARGIN_NUM/SPMI_FTSMPS426_STEP_MARGIN_DEN is
* used to adjust the step rate in order to account for oscillator variance.
*/
#define SPMI_FTSMPS426_STEP_MARGIN_NUM 10
#define SPMI_FTSMPS426_STEP_MARGIN_DEN 11
/* VSET value to decide the range of ULT SMPS */
#define ULT_SMPS_RANGE_SPLIT 0x60
/**
* struct spmi_voltage_range - regulator set point voltage mapping description
* @min_uV: Minimum programmable output voltage resulting from
* set point register value 0x00
* @max_uV: Maximum programmable output voltage
* @step_uV: Output voltage increase resulting from the set point
* register value increasing by 1
* @set_point_min_uV: Minimum allowed voltage
* @set_point_max_uV: Maximum allowed voltage. This may be tweaked in order
* to pick which range should be used in the case of
* overlapping set points.
* @n_voltages: Number of preferred voltage set points present in this
* range
* @range_sel: Voltage range register value corresponding to this range
*
* The following relationships must be true for the values used in this struct:
* (max_uV - min_uV) % step_uV == 0
* (set_point_min_uV - min_uV) % step_uV == 0*
* (set_point_max_uV - min_uV) % step_uV == 0*
* n_voltages = (set_point_max_uV - set_point_min_uV) / step_uV + 1
*
* *Note, set_point_min_uV == set_point_max_uV == 0 is allowed in order to
* specify that the voltage range has meaning, but is not preferred.
*/
struct spmi_voltage_range {
int min_uV;
int max_uV;
int step_uV;
int set_point_min_uV;
int set_point_max_uV;
unsigned n_voltages;
u8 range_sel;
};
/*
* The ranges specified in the spmi_voltage_set_points struct must be listed
* so that range[i].set_point_max_uV < range[i+1].set_point_min_uV.
*/
struct spmi_voltage_set_points {
struct spmi_voltage_range *range;
int count;
unsigned n_voltages;
};
struct spmi_regulator {
struct regulator_desc desc;
struct device *dev;
struct delayed_work ocp_work;
struct regmap *regmap;
struct spmi_voltage_set_points *set_points;
enum spmi_regulator_logical_type logical_type;
int ocp_irq;
int ocp_count;
int ocp_max_retries;
int ocp_retry_delay_ms;
int hpm_min_load;
int slew_rate;
ktime_t vs_enable_time;
u16 base;
struct list_head node;
};
struct spmi_regulator_mapping {
enum spmi_regulator_type type;
enum spmi_regulator_subtype subtype;
enum spmi_regulator_logical_type logical_type;
u32 revision_min;
u32 revision_max;
const struct regulator_ops *ops;
struct spmi_voltage_set_points *set_points;
int hpm_min_load;
};
struct spmi_regulator_data {
const char *name;
u16 base;
const char *supply;
const char *ocp;
u16 force_type;
};
#define SPMI_VREG(_type, _subtype, _dig_major_min, _dig_major_max, \
_logical_type, _ops_val, _set_points_val, _hpm_min_load) \
{ \
.type = SPMI_REGULATOR_TYPE_##_type, \
.subtype = SPMI_REGULATOR_SUBTYPE_##_subtype, \
.revision_min = _dig_major_min, \
.revision_max = _dig_major_max, \
.logical_type = SPMI_REGULATOR_LOGICAL_TYPE_##_logical_type, \
.ops = &spmi_##_ops_val##_ops, \
.set_points = &_set_points_val##_set_points, \
.hpm_min_load = _hpm_min_load, \
}
#define SPMI_VREG_VS(_subtype, _dig_major_min, _dig_major_max) \
{ \
.type = SPMI_REGULATOR_TYPE_VS, \
.subtype = SPMI_REGULATOR_SUBTYPE_##_subtype, \
.revision_min = _dig_major_min, \
.revision_max = _dig_major_max, \
.logical_type = SPMI_REGULATOR_LOGICAL_TYPE_VS, \
.ops = &spmi_vs_ops, \
}
#define SPMI_VOLTAGE_RANGE(_range_sel, _min_uV, _set_point_min_uV, \
_set_point_max_uV, _max_uV, _step_uV) \
{ \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
.set_point_min_uV = _set_point_min_uV, \
.set_point_max_uV = _set_point_max_uV, \
.step_uV = _step_uV, \
.range_sel = _range_sel, \
}
#define DEFINE_SPMI_SET_POINTS(name) \
struct spmi_voltage_set_points name##_set_points = { \
.range = name##_ranges, \
.count = ARRAY_SIZE(name##_ranges), \
}
/*
* These tables contain the physically available PMIC regulator voltage setpoint
* ranges. Where two ranges overlap in hardware, one of the ranges is trimmed
* to ensure that the setpoints available to software are monotonically
* increasing and unique. The set_voltage callback functions expect these
* properties to hold.
*/
static struct spmi_voltage_range pldo_ranges[] = {
SPMI_VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
SPMI_VOLTAGE_RANGE(3, 1500000, 1550000, 3075000, 3075000, 25000),
SPMI_VOLTAGE_RANGE(4, 1750000, 3100000, 4900000, 4900000, 50000),
};
static struct spmi_voltage_range nldo1_ranges[] = {
SPMI_VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
};
static struct spmi_voltage_range nldo2_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 0, 0, 1537500, 12500),
SPMI_VOLTAGE_RANGE(1, 375000, 375000, 768750, 768750, 6250),
SPMI_VOLTAGE_RANGE(2, 750000, 775000, 1537500, 1537500, 12500),
};
static struct spmi_voltage_range nldo3_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
SPMI_VOLTAGE_RANGE(1, 375000, 0, 0, 1537500, 12500),
SPMI_VOLTAGE_RANGE(2, 750000, 0, 0, 1537500, 12500),
};
static struct spmi_voltage_range ln_ldo_ranges[] = {
SPMI_VOLTAGE_RANGE(1, 690000, 690000, 1110000, 1110000, 60000),
SPMI_VOLTAGE_RANGE(0, 1380000, 1380000, 2220000, 2220000, 120000),
};
static struct spmi_voltage_range smps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
SPMI_VOLTAGE_RANGE(1, 1550000, 1575000, 3125000, 3125000, 25000),
};
static struct spmi_voltage_range ftsmps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 0, 350000, 1275000, 1275000, 5000),
SPMI_VOLTAGE_RANGE(1, 0, 1280000, 2040000, 2040000, 10000),
};
static struct spmi_voltage_range ftsmps2p5_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 80000, 350000, 1355000, 1355000, 5000),
SPMI_VOLTAGE_RANGE(1, 160000, 1360000, 2200000, 2200000, 10000),
};
static struct spmi_voltage_range ftsmps426_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 0, 320000, 1352000, 1352000, 4000),
};
static struct spmi_voltage_range boost_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 4000000, 4000000, 5550000, 5550000, 50000),
};
static struct spmi_voltage_range boost_byp_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 2500000, 2500000, 5200000, 5650000, 50000),
};
static struct spmi_voltage_range ult_lo_smps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
SPMI_VOLTAGE_RANGE(1, 750000, 0, 0, 1525000, 25000),
};
static struct spmi_voltage_range ult_ho_smps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1550000, 1550000, 2325000, 2325000, 25000),
};
static struct spmi_voltage_range ult_nldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
};
static struct spmi_voltage_range ult_pldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1750000, 1750000, 3337500, 3337500, 12500),
};
static struct spmi_voltage_range pldo660_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1504000, 1504000, 3544000, 3544000, 8000),
};
static struct spmi_voltage_range nldo660_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 1304000, 1304000, 8000),
};
static struct spmi_voltage_range ht_lvpldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1504000, 1504000, 2000000, 2000000, 8000),
};
static struct spmi_voltage_range ht_nldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 312000, 312000, 1304000, 1304000, 8000),
};
static struct spmi_voltage_range hfs430_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 2040000, 2040000, 8000),
};
static struct spmi_voltage_range ht_p150_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1616000, 1616000, 3304000, 3304000, 8000),
};
static struct spmi_voltage_range ht_p600_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1704000, 1704000, 1896000, 1896000, 8000),
};
static struct spmi_voltage_range nldo_510_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 1304000, 1304000, 8000),
};
static struct spmi_voltage_range ftsmps510_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 300000, 300000, 1372000, 1372000, 4000),
};
static DEFINE_SPMI_SET_POINTS(pldo);
static DEFINE_SPMI_SET_POINTS(nldo1);
static DEFINE_SPMI_SET_POINTS(nldo2);
static DEFINE_SPMI_SET_POINTS(nldo3);
static DEFINE_SPMI_SET_POINTS(ln_ldo);
static DEFINE_SPMI_SET_POINTS(smps);
static DEFINE_SPMI_SET_POINTS(ftsmps);
static DEFINE_SPMI_SET_POINTS(ftsmps2p5);
static DEFINE_SPMI_SET_POINTS(ftsmps426);
static DEFINE_SPMI_SET_POINTS(boost);
static DEFINE_SPMI_SET_POINTS(boost_byp);
static DEFINE_SPMI_SET_POINTS(ult_lo_smps);
static DEFINE_SPMI_SET_POINTS(ult_ho_smps);
static DEFINE_SPMI_SET_POINTS(ult_nldo);
static DEFINE_SPMI_SET_POINTS(ult_pldo);
static DEFINE_SPMI_SET_POINTS(pldo660);
static DEFINE_SPMI_SET_POINTS(nldo660);
static DEFINE_SPMI_SET_POINTS(ht_lvpldo);
static DEFINE_SPMI_SET_POINTS(ht_nldo);
static DEFINE_SPMI_SET_POINTS(hfs430);
static DEFINE_SPMI_SET_POINTS(ht_p150);
static DEFINE_SPMI_SET_POINTS(ht_p600);
static DEFINE_SPMI_SET_POINTS(nldo_510);
static DEFINE_SPMI_SET_POINTS(ftsmps510);
static inline int spmi_vreg_read(struct spmi_regulator *vreg, u16 addr, u8 *buf,
int len)
{
return regmap_bulk_read(vreg->regmap, vreg->base + addr, buf, len);
}
static inline int spmi_vreg_write(struct spmi_regulator *vreg, u16 addr,
u8 *buf, int len)
{
return regmap_bulk_write(vreg->regmap, vreg->base + addr, buf, len);
}
static int spmi_vreg_update_bits(struct spmi_regulator *vreg, u16 addr, u8 val,
u8 mask)
{
return regmap_update_bits(vreg->regmap, vreg->base + addr, mask, val);
}
static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
if (vreg->ocp_irq) {
vreg->ocp_count = 0;
vreg->vs_enable_time = ktime_get();
}
return regulator_enable_regmap(rdev);
}
static int spmi_regulator_vs_ocp(struct regulator_dev *rdev, int lim_uA,
int severity, bool enable)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 reg = SPMI_VS_OCP_OVERRIDE;
if (lim_uA || !enable || severity != REGULATOR_SEVERITY_PROT)
return -EINVAL;
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, ®, 1);
}
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
int selector, voltage_sel;
/* Check if request voltage is outside of physically settable range. */
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
lim_max_uV =
vreg->set_points->range[vreg->set_points->count - 1].set_point_max_uV;
if (uV < lim_min_uV && max_uV >= lim_min_uV)
uV = lim_min_uV;
if (uV < lim_min_uV || uV > lim_max_uV) {
dev_err(vreg->dev,
"request v=[%d, %d] is outside possible v=[%d, %d]\n",
min_uV, max_uV, lim_min_uV, lim_max_uV);
return -EINVAL;
}
/* Find the range which uV is inside of. */
for (i = vreg->set_points->count - 1; i > 0; i--) {
range_max_uV = vreg->set_points->range[i - 1].set_point_max_uV;
if (uV > range_max_uV && range_max_uV > 0)
break;
}
range_id = i;
range = &vreg->set_points->range[range_id];
/*
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
uV = voltage_sel * range->step_uV + range->min_uV;
if (uV > max_uV) {
dev_err(vreg->dev,
"request v=[%d, %d] cannot be met by any set point; "
"next set point: %d\n",
min_uV, max_uV, uV);
return -EINVAL;
}
selector = 0;
for (i = 0; i < range_id; i++)
selector += vreg->set_points->range[i].n_voltages;
selector += (uV - range->set_point_min_uV) / range->step_uV;
return selector;
}
static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
unsigned selector, u8 *range_sel,
u8 *voltage_sel)
{
const struct spmi_voltage_range *range, *end;
unsigned offset;
range = vreg->set_points->range;
end = range + vreg->set_points->count;
for (; range < end; range++) {
if (selector < range->n_voltages) {
/*
* hardware selectors between set point min and real
* min are invalid so we ignore them
*/
offset = range->set_point_min_uV - range->min_uV;
offset /= range->step_uV;
*voltage_sel = selector + offset;
*range_sel = range->range_sel;
return 0;
}
selector -= range->n_voltages;
}
return -EINVAL;
}
static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
const struct spmi_voltage_range *range)
{
unsigned sw_sel = 0;
unsigned offset, max_hw_sel;
const struct spmi_voltage_range *r = vreg->set_points->range;
const struct spmi_voltage_range *end = r + vreg->set_points->count;
for (; r < end; r++) {
if (r == range && range->n_voltages) {
/*
* hardware selectors between set point min and real
* min and between set point max and real max are
* invalid so we return an error if they're
* programmed into the hardware
*/
offset = range->set_point_min_uV - range->min_uV;
offset /= range->step_uV;
if (hw_sel < offset)
return -EINVAL;
max_hw_sel = range->set_point_max_uV - range->min_uV;
max_hw_sel /= range->step_uV;
if (hw_sel > max_hw_sel)
return -EINVAL;
return sw_sel + hw_sel - offset;
}
sw_sel += r->n_voltages;
}
return -EINVAL;
}
static const struct spmi_voltage_range *
spmi_regulator_find_range(struct spmi_regulator *vreg)
{
u8 range_sel;
const struct spmi_voltage_range *range, *end;
range = vreg->set_points->range;
end = range + vreg->set_points->count;
spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, &range_sel, 1);
for (; range < end; range++)
if (range->range_sel == range_sel)
return range;
return NULL;
}
static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
int i, selector;
range = spmi_regulator_find_range(vreg);
if (!range)
goto different_range;
if (uV < range->min_uV && max_uV >= range->min_uV)
uV = range->min_uV;
if (uV < range->min_uV || uV > range->max_uV) {
/* Current range doesn't support the requested voltage. */
goto different_range;
}
/*
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
uV = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
uV = uV * range->step_uV + range->min_uV;
if (uV > max_uV) {
/*
* No set point in the current voltage range is within the
* requested min_uV to max_uV range.
*/
goto different_range;
}
selector = 0;
for (i = 0; i < vreg->set_points->count; i++) {
if (uV >= vreg->set_points->range[i].set_point_min_uV
&& uV <= vreg->set_points->range[i].set_point_max_uV) {
selector +=
(uV - vreg->set_points->range[i].set_point_min_uV)
/ vreg->set_points->range[i].step_uV;
break;
}
selector += vreg->set_points->range[i].n_voltages;
}
if (selector >= vreg->set_points->n_voltages)
goto different_range;
return selector;
different_range:
return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
}
static int spmi_regulator_common_map_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
/*
* Favor staying in the current voltage range if possible. This avoids
* voltage spikes that occur when changing the voltage range.
*/
return spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV);
}
static int
spmi_regulator_common_set_voltage(struct regulator_dev *rdev, unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int ret;
u8 buf[2];
u8 range_sel, voltage_sel;
ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
buf[0] = range_sel;
buf[1] = voltage_sel;
return spmi_vreg_write(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, buf, 2);
}
static int spmi_regulator_common_list_voltage(struct regulator_dev *rdev,
unsigned selector);
static int spmi_regulator_ftsmps426_set_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 buf[2];
int mV;
mV = spmi_regulator_common_list_voltage(rdev, selector) / 1000;
buf[0] = mV & 0xff;
buf[1] = mV >> 8;
return spmi_vreg_write(vreg, SPMI_FTSMPS426_REG_VOLTAGE_LSB, buf, 2);
}
static int spmi_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int old_selector, unsigned int new_selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int diff_uV;
diff_uV = abs(spmi_regulator_common_list_voltage(rdev, new_selector) -
spmi_regulator_common_list_voltage(rdev, old_selector));
return DIV_ROUND_UP(diff_uV, vreg->slew_rate);
}
static int spmi_regulator_common_get_voltage(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
const struct spmi_voltage_range *range;
u8 voltage_sel;
spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &voltage_sel, 1);
range = spmi_regulator_find_range(vreg);
if (!range)
return -EINVAL;
return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
static int spmi_regulator_ftsmps426_get_voltage(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
const struct spmi_voltage_range *range;
u8 buf[2];
int uV;
spmi_vreg_read(vreg, SPMI_FTSMPS426_REG_VOLTAGE_LSB, buf, 2);
uV = (((unsigned int)buf[1] << 8) | (unsigned int)buf[0]) * 1000;
range = vreg->set_points->range;
return (uV - range->set_point_min_uV) / range->step_uV;
}
static int spmi_regulator_single_map_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
}
static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 sel = selector;
/*
* Certain types of regulators do not have a range select register so
* only voltage set register needs to be written.
*/
return spmi_vreg_write(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &sel, 1);
}
static int spmi_regulator_single_range_get_voltage(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 selector;
int ret;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &selector, 1);
if (ret)
return ret;
return selector;
}
static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int ret;
u8 range_sel, voltage_sel;
ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
/*
* Calculate VSET based on range
* In case of range 0: voltage_sel is a 7 bit value, can be written
* witout any modification.
* In case of range 1: voltage_sel is a 5 bit value, bits[7-5] set to
* [011].
*/
if (range_sel == 1)
voltage_sel |= ULT_SMPS_RANGE_SPLIT;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_VOLTAGE_SET,
voltage_sel, 0xff);
}
static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
const struct spmi_voltage_range *range;
u8 voltage_sel;
spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &voltage_sel, 1);
range = spmi_regulator_find_range(vreg);
if (!range)
return -EINVAL;
if (range->range_sel == 1)
voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
static int spmi_regulator_common_list_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int uV = 0;
int i;
if (selector >= vreg->set_points->n_voltages)
return 0;
for (i = 0; i < vreg->set_points->count; i++) {
if (selector < vreg->set_points->range[i].n_voltages) {
uV = selector * vreg->set_points->range[i].step_uV
+ vreg->set_points->range[i].set_point_min_uV;
break;
}
selector -= vreg->set_points->range[i].n_voltages;
}
return uV;
}
static int
spmi_regulator_common_set_bypass(struct regulator_dev *rdev, bool enable)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 mask = SPMI_COMMON_MODE_BYPASS_MASK;
u8 val = 0;
if (enable)
val = mask;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
}
static int
spmi_regulator_common_get_bypass(struct regulator_dev *rdev, bool *enable)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 val;
int ret;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, &val, 1);
*enable = val & SPMI_COMMON_MODE_BYPASS_MASK;
return ret;
}
static unsigned int spmi_regulator_common_get_mode(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 reg;
spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, ®, 1);
reg &= SPMI_COMMON_MODE_HPM_MASK | SPMI_COMMON_MODE_AUTO_MASK;
switch (reg) {
case SPMI_COMMON_MODE_HPM_MASK:
return REGULATOR_MODE_NORMAL;
case SPMI_COMMON_MODE_AUTO_MASK:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_IDLE;
}
}
static unsigned int spmi_regulator_ftsmps426_get_mode(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 reg;
spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, ®, 1);
switch (reg) {
case SPMI_FTSMPS426_MODE_HPM_MASK:
return REGULATOR_MODE_NORMAL;
case SPMI_FTSMPS426_MODE_AUTO_MASK:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_IDLE;
}
}
static unsigned int spmi_regulator_hfsmps_get_mode(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 reg;
spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, ®, 1);
switch (reg) {
case SPMI_HFSMPS_MODE_HPM_MASK:
return REGULATOR_MODE_NORMAL;
case SPMI_HFSMPS_MODE_AUTO_MASK:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_IDLE;
}
}
static int
spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 mask = SPMI_COMMON_MODE_HPM_MASK | SPMI_COMMON_MODE_AUTO_MASK;
u8 val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = SPMI_COMMON_MODE_HPM_MASK;
break;
case REGULATOR_MODE_FAST:
val = SPMI_COMMON_MODE_AUTO_MASK;
break;
default:
val = 0;
break;
}
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
}
static int
spmi_regulator_ftsmps426_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 mask = SPMI_FTSMPS426_MODE_MASK;
u8 val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = SPMI_FTSMPS426_MODE_HPM_MASK;
break;
case REGULATOR_MODE_FAST:
val = SPMI_FTSMPS426_MODE_AUTO_MASK;
break;
case REGULATOR_MODE_IDLE:
val = SPMI_FTSMPS426_MODE_LPM_MASK;
break;
default:
return -EINVAL;
}
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
}
static int
spmi_regulator_hfsmps_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 mask = SPMI_HFSMPS_MODE_MASK;
u8 val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = SPMI_HFSMPS_MODE_HPM_MASK;
break;
case REGULATOR_MODE_FAST:
val = SPMI_HFSMPS_MODE_AUTO_MASK;
break;
case REGULATOR_MODE_IDLE:
val = vreg->logical_type ==
SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3 ?
SPMI_HFSMPS_MODE_RETENTION_MASK :
SPMI_HFSMPS_MODE_LPM_MASK;
break;
default:
return -EINVAL;
}
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
}
static int
spmi_regulator_common_set_load(struct regulator_dev *rdev, int load_uA)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
unsigned int mode;
if (load_uA >= vreg->hpm_min_load)
mode = REGULATOR_MODE_NORMAL;
else
mode = REGULATOR_MODE_IDLE;
return spmi_regulator_common_set_mode(rdev, mode);
}
static int spmi_regulator_common_set_pull_down(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
unsigned int mask = SPMI_COMMON_PULL_DOWN_ENABLE_MASK;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_PULL_DOWN,
mask, mask);
}
static int spmi_regulator_hfsmps_set_pull_down(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
unsigned int mask = SPMI_COMMON_PULL_DOWN_ENABLE_MASK;
return spmi_vreg_update_bits(vreg, SPMI_HFSMPS_REG_PULL_DOWN,
mask, mask);
}
static int spmi_regulator_common_set_soft_start(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
unsigned int mask = SPMI_LDO_SOFT_START_ENABLE_MASK;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_SOFT_START,
mask, mask);
}
static int spmi_regulator_set_ilim(struct regulator_dev *rdev, int ilim_uA)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
enum spmi_regulator_logical_type type = vreg->logical_type;
unsigned int current_reg;
u8 reg;
u8 mask = SPMI_BOOST_CURRENT_LIMIT_MASK |
SPMI_BOOST_CURRENT_LIMIT_ENABLE_MASK;
int max = (SPMI_BOOST_CURRENT_LIMIT_MASK + 1) * 500;
if (type == SPMI_REGULATOR_LOGICAL_TYPE_BOOST)
current_reg = SPMI_BOOST_REG_CURRENT_LIMIT;
else
current_reg = SPMI_BOOST_BYP_REG_CURRENT_LIMIT;
if (ilim_uA > max || ilim_uA <= 0)
return -EINVAL;
reg = (ilim_uA - 1) / 500;
reg |= SPMI_BOOST_CURRENT_LIMIT_ENABLE_MASK;
return spmi_vreg_update_bits(vreg, current_reg, reg, mask);
}
static int spmi_regulator_vs_clear_ocp(struct spmi_regulator *vreg)
{
int ret;
ret = spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
SPMI_COMMON_DISABLE, SPMI_COMMON_ENABLE_MASK);
vreg->vs_enable_time = ktime_get();
ret = spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
SPMI_COMMON_ENABLE, SPMI_COMMON_ENABLE_MASK);
return ret;
}
static void spmi_regulator_vs_ocp_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct spmi_regulator *vreg
= container_of(dwork, struct spmi_regulator, ocp_work);
spmi_regulator_vs_clear_ocp(vreg);
}
static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
{
struct spmi_regulator *vreg = data;
ktime_t ocp_irq_time;
s64 ocp_trigger_delay_us;
ocp_irq_time = ktime_get();
ocp_trigger_delay_us = ktime_us_delta(ocp_irq_time,
vreg->vs_enable_time);
/*
* Reset the OCP count if there is a large delay between switch enable
* and when OCP triggers. This is indicative of a hotplug event as
* opposed to a fault.
*/
if (ocp_trigger_delay_us > SPMI_VS_OCP_FAULT_DELAY_US)
vreg->ocp_count = 0;
/* Wait for switch output to settle back to 0 V after OCP triggered. */
udelay(SPMI_VS_OCP_FALL_DELAY_US);
vreg->ocp_count++;
if (vreg->ocp_count == 1) {
/* Immediately clear the over current condition. */
spmi_regulator_vs_clear_ocp(vreg);
} else if (vreg->ocp_count <= vreg->ocp_max_retries) {
/* Schedule the over current clear task to run later. */
schedule_delayed_work(&vreg->ocp_work,
msecs_to_jiffies(vreg->ocp_retry_delay_ms) + 1);
} else {
dev_err(vreg->dev,
"OCP triggered %d times; no further retries\n",
vreg->ocp_count);
}
return IRQ_HANDLED;
}
#define SAW3_VCTL_DATA_MASK 0xFF
#define SAW3_VCTL_CLEAR_MASK 0x700FF
#define SAW3_AVS_CTL_EN_MASK 0x1
#define SAW3_AVS_CTL_TGGL_MASK 0x8000000
#define SAW3_AVS_CTL_CLEAR_MASK 0x7efc00
static struct regmap *saw_regmap;
static void spmi_saw_set_vdd(void *data)
{
u32 vctl, data3, avs_ctl, pmic_sts;
bool avs_enabled = false;
unsigned long timeout;
u8 voltage_sel = *(u8 *)data;
regmap_read(saw_regmap, SAW3_AVS_CTL, &avs_ctl);
regmap_read(saw_regmap, SAW3_VCTL, &vctl);
regmap_read(saw_regmap, SAW3_SPM_PMIC_DATA_3, &data3);
/* select the band */
vctl &= ~SAW3_VCTL_CLEAR_MASK;
vctl |= (u32)voltage_sel;
data3 &= ~SAW3_VCTL_CLEAR_MASK;
data3 |= (u32)voltage_sel;
/* If AVS is enabled, switch it off during the voltage change */
avs_enabled = SAW3_AVS_CTL_EN_MASK & avs_ctl;
if (avs_enabled) {
avs_ctl &= ~SAW3_AVS_CTL_TGGL_MASK;
regmap_write(saw_regmap, SAW3_AVS_CTL, avs_ctl);
}
regmap_write(saw_regmap, SAW3_RST, 1);
regmap_write(saw_regmap, SAW3_VCTL, vctl);
regmap_write(saw_regmap, SAW3_SPM_PMIC_DATA_3, data3);
timeout = jiffies + usecs_to_jiffies(100);
do {
regmap_read(saw_regmap, SAW3_PMIC_STS, &pmic_sts);
pmic_sts &= SAW3_VCTL_DATA_MASK;
if (pmic_sts == (u32)voltage_sel)
break;
cpu_relax();
} while (time_before(jiffies, timeout));
/* After successful voltage change, switch the AVS back on */
if (avs_enabled) {
pmic_sts &= 0x3f;
avs_ctl &= ~SAW3_AVS_CTL_CLEAR_MASK;
avs_ctl |= ((pmic_sts - 4) << 10);
avs_ctl |= (pmic_sts << 17);
avs_ctl |= SAW3_AVS_CTL_TGGL_MASK;
regmap_write(saw_regmap, SAW3_AVS_CTL, avs_ctl);
}
}
static int
spmi_regulator_saw_set_voltage(struct regulator_dev *rdev, unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int ret;
u8 range_sel, voltage_sel;
ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
if (0 != range_sel) {
dev_dbg(&rdev->dev, "range_sel = %02X voltage_sel = %02X", \
range_sel, voltage_sel);
return -EINVAL;
}
/* Always do the SAW register writes on the first CPU */
return smp_call_function_single(0, spmi_saw_set_vdd, \
&voltage_sel, true);
}
static struct regulator_ops spmi_saw_ops = {};
static const struct regulator_ops spmi_smps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_pull_down = spmi_regulator_common_set_pull_down,
};
static const struct regulator_ops spmi_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_bypass = spmi_regulator_common_set_bypass,
.get_bypass = spmi_regulator_common_get_bypass,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
};
static const struct regulator_ops spmi_ln_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_bypass = spmi_regulator_common_set_bypass,
.get_bypass = spmi_regulator_common_get_bypass,
};
static const struct regulator_ops spmi_vs_ops = {
.enable = spmi_regulator_vs_enable,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
.set_over_current_protection = spmi_regulator_vs_ocp,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
};
static const struct regulator_ops spmi_boost_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_input_current_limit = spmi_regulator_set_ilim,
};
static const struct regulator_ops spmi_ftsmps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_pull_down = spmi_regulator_common_set_pull_down,
};
static const struct regulator_ops spmi_ult_lo_smps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_pull_down = spmi_regulator_common_set_pull_down,
};
static const struct regulator_ops spmi_ult_ho_smps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_pull_down = spmi_regulator_common_set_pull_down,
};
static const struct regulator_ops spmi_ult_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_bypass = spmi_regulator_common_set_bypass,
.get_bypass = spmi_regulator_common_get_bypass,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
};
static const struct regulator_ops spmi_ftsmps426_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ftsmps426_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ftsmps426_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_ftsmps426_set_mode,
.get_mode = spmi_regulator_ftsmps426_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_pull_down = spmi_regulator_common_set_pull_down,
};
static const struct regulator_ops spmi_hfs430_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ftsmps426_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ftsmps426_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_ftsmps426_set_mode,
.get_mode = spmi_regulator_ftsmps426_get_mode,
};
static const struct regulator_ops spmi_hfsmps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ftsmps426_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ftsmps426_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_hfsmps_set_mode,
.get_mode = spmi_regulator_hfsmps_get_mode,
.set_load = spmi_regulator_common_set_load,
.set_pull_down = spmi_regulator_hfsmps_set_pull_down,
};
/* Maximum possible digital major revision value */
#define INF 0xFF
static const struct spmi_regulator_mapping supported_regulators[] = {
/* type subtype dig_min dig_max ltype ops setpoints hpm_min */
SPMI_VREG(LDO, HT_P600, 0, INF, HFS430, hfs430, ht_p600, 10000),
SPMI_VREG(LDO, HT_P150, 0, INF, HFS430, hfs430, ht_p150, 10000),
SPMI_VREG(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000),
SPMI_VREG(BUCK, HFS430, 0, 3, HFS430, hfs430, hfs430, 10000),
SPMI_VREG(BUCK, HFSMPS_510, 4, INF, HFSMPS, hfsmps, hfs430, 100000),
SPMI_VREG(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000),
SPMI_VREG(LDO, N600, 0, 0, LDO, ldo, nldo2, 10000),
SPMI_VREG(LDO, N1200, 0, 0, LDO, ldo, nldo2, 10000),
SPMI_VREG(LDO, N600, 1, INF, LDO, ldo, nldo3, 10000),
SPMI_VREG(LDO, N1200, 1, INF, LDO, ldo, nldo3, 10000),
SPMI_VREG(LDO, N600_ST, 0, 0, LDO, ldo, nldo2, 10000),
SPMI_VREG(LDO, N1200_ST, 0, 0, LDO, ldo, nldo2, 10000),
SPMI_VREG(LDO, N600_ST, 1, INF, LDO, ldo, nldo3, 10000),
SPMI_VREG(LDO, N1200_ST, 1, INF, LDO, ldo, nldo3, 10000),
SPMI_VREG(LDO, P50, 0, INF, LDO, ldo, pldo, 5000),
SPMI_VREG(LDO, P150, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, P300, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, P600, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, P1200, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, LN, 0, INF, LN_LDO, ln_ldo, ln_ldo, 0),
SPMI_VREG(LDO, LV_P50, 0, INF, LDO, ldo, pldo, 5000),
SPMI_VREG(LDO, LV_P150, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, LV_P300, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, LV_P600, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, LV_P1200, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, HT_N300_ST, 0, INF, FTSMPS426, ftsmps426,
ht_nldo, 30000),
SPMI_VREG(LDO, HT_N600_ST, 0, INF, FTSMPS426, ftsmps426,
ht_nldo, 30000),
SPMI_VREG(LDO, HT_N1200_ST, 0, INF, FTSMPS426, ftsmps426,
ht_nldo, 30000),
SPMI_VREG(LDO, HT_LVP150, 0, INF, FTSMPS426, ftsmps426,
ht_lvpldo, 10000),
SPMI_VREG(LDO, HT_LVP300, 0, INF, FTSMPS426, ftsmps426,
ht_lvpldo, 10000),
SPMI_VREG(LDO, L660_N300_ST, 0, INF, FTSMPS426, ftsmps426,
nldo660, 10000),
SPMI_VREG(LDO, L660_N600_ST, 0, INF, FTSMPS426, ftsmps426,
nldo660, 10000),
SPMI_VREG(LDO, L660_P50, 0, INF, FTSMPS426, ftsmps426,
pldo660, 10000),
SPMI_VREG(LDO, L660_P150, 0, INF, FTSMPS426, ftsmps426,
pldo660, 10000),
SPMI_VREG(LDO, L660_P600, 0, INF, FTSMPS426, ftsmps426,
pldo660, 10000),
SPMI_VREG(LDO, L660_LVP150, 0, INF, FTSMPS426, ftsmps426,
ht_lvpldo, 10000),
SPMI_VREG(LDO, L660_LVP600, 0, INF, FTSMPS426, ftsmps426,
ht_lvpldo, 10000),
SPMI_VREG_VS(LV100, 0, INF),
SPMI_VREG_VS(LV300, 0, INF),
SPMI_VREG_VS(MV300, 0, INF),
SPMI_VREG_VS(MV500, 0, INF),
SPMI_VREG_VS(HDMI, 0, INF),
SPMI_VREG_VS(OTG, 0, INF),
SPMI_VREG(BOOST, 5V_BOOST, 0, INF, BOOST, boost, boost, 0),
SPMI_VREG(FTS, FTS_CTL, 0, INF, FTSMPS, ftsmps, ftsmps, 100000),
SPMI_VREG(FTS, FTS2p5_CTL, 0, INF, FTSMPS, ftsmps, ftsmps2p5, 100000),
SPMI_VREG(FTS, FTS426_CTL, 0, INF, FTSMPS426, ftsmps426, ftsmps426, 100000),
SPMI_VREG(BOOST_BYP, BB_2A, 0, INF, BOOST_BYP, boost, boost_byp, 0),
SPMI_VREG(ULT_BUCK, ULT_HF_CTL1, 0, INF, ULT_LO_SMPS, ult_lo_smps,
ult_lo_smps, 100000),
SPMI_VREG(ULT_BUCK, ULT_HF_CTL2, 0, INF, ULT_LO_SMPS, ult_lo_smps,
ult_lo_smps, 100000),
SPMI_VREG(ULT_BUCK, ULT_HF_CTL3, 0, INF, ULT_LO_SMPS, ult_lo_smps,
ult_lo_smps, 100000),
SPMI_VREG(ULT_BUCK, ULT_HF_CTL4, 0, INF, ULT_HO_SMPS, ult_ho_smps,
ult_ho_smps, 100000),
SPMI_VREG(ULT_LDO, N300_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo, 10000),
SPMI_VREG(ULT_LDO, N600_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo, 10000),
SPMI_VREG(ULT_LDO, N900_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo, 10000),
SPMI_VREG(ULT_LDO, N1200_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo, 10000),
SPMI_VREG(ULT_LDO, LV_P50, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, LV_P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, LV_P300, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, LV_P450, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P600, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P300, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P50, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 5000),
SPMI_VREG(LDO, LV_P150_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
SPMI_VREG(LDO, LV_P300_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
SPMI_VREG(LDO, LV_P600_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
SPMI_VREG(LDO, MV_P50_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
SPMI_VREG(LDO, MV_P150_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
SPMI_VREG(LDO, MV_P600_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
SPMI_VREG(LDO, N300_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
SPMI_VREG(LDO, N600_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
SPMI_VREG(LDO, N1200_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
SPMI_VREG(FTS, FTSMPS_510, 0, INF, FTSMPS3, hfsmps, ftsmps510, 100000),
};
static void spmi_calculate_num_voltages(struct spmi_voltage_set_points *points)
{
unsigned int n;
struct spmi_voltage_range *range = points->range;
for (; range < points->range + points->count; range++) {
n = 0;
if (range->set_point_max_uV) {
n = range->set_point_max_uV - range->set_point_min_uV;
n = (n / range->step_uV) + 1;
}
range->n_voltages = n;
points->n_voltages += n;
}
}
static int spmi_regulator_match(struct spmi_regulator *vreg, u16 force_type)
{
const struct spmi_regulator_mapping *mapping;
int ret, i;
u32 dig_major_rev;
u8 version[SPMI_COMMON_REG_SUBTYPE - SPMI_COMMON_REG_DIG_MAJOR_REV + 1];
u8 type, subtype;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_DIG_MAJOR_REV, version,
ARRAY_SIZE(version));
if (ret) {
dev_dbg(vreg->dev, "could not read version registers\n");
return ret;
}
dig_major_rev = version[SPMI_COMMON_REG_DIG_MAJOR_REV
- SPMI_COMMON_REG_DIG_MAJOR_REV];
if (!force_type) {
type = version[SPMI_COMMON_REG_TYPE -
SPMI_COMMON_REG_DIG_MAJOR_REV];
subtype = version[SPMI_COMMON_REG_SUBTYPE -
SPMI_COMMON_REG_DIG_MAJOR_REV];
} else {
type = force_type >> 8;
subtype = force_type;
}
for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
mapping = &supported_regulators[i];
if (mapping->type == type && mapping->subtype == subtype
&& mapping->revision_min <= dig_major_rev
&& mapping->revision_max >= dig_major_rev)
goto found;
}
dev_err(vreg->dev,
"unsupported regulator: name=%s type=0x%02X, subtype=0x%02X, dig major rev=0x%02X\n",
vreg->desc.name, type, subtype, dig_major_rev);
return -ENODEV;
found:
vreg->logical_type = mapping->logical_type;
vreg->set_points = mapping->set_points;
vreg->hpm_min_load = mapping->hpm_min_load;
vreg->desc.ops = mapping->ops;
if (mapping->set_points) {
if (!mapping->set_points->n_voltages)
spmi_calculate_num_voltages(mapping->set_points);
vreg->desc.n_voltages = mapping->set_points->n_voltages;
}
return 0;
}
static int spmi_regulator_init_slew_rate(struct spmi_regulator *vreg)
{
int ret;
u8 reg = 0;
int step, delay, slew_rate, step_delay;
const struct spmi_voltage_range *range;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_STEP_CTRL, ®, 1);
if (ret) {
dev_err(vreg->dev, "spmi read failed, ret=%d\n", ret);
return ret;
}
range = spmi_regulator_find_range(vreg);
if (!range)
return -EINVAL;
switch (vreg->logical_type) {
case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
step_delay = SPMI_FTSMPS_STEP_DELAY;
break;
default:
step_delay = SPMI_DEFAULT_STEP_DELAY;
break;
}
step = reg & SPMI_FTSMPS_STEP_CTRL_STEP_MASK;
step >>= SPMI_FTSMPS_STEP_CTRL_STEP_SHIFT;
delay = reg & SPMI_FTSMPS_STEP_CTRL_DELAY_MASK;
delay >>= SPMI_FTSMPS_STEP_CTRL_DELAY_SHIFT;
/* slew_rate has units of uV/us */
slew_rate = SPMI_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
slew_rate /= 1000 * (step_delay << delay);
slew_rate *= SPMI_FTSMPS_STEP_MARGIN_NUM;
slew_rate /= SPMI_FTSMPS_STEP_MARGIN_DEN;
/* Ensure that the slew rate is greater than 0 */
vreg->slew_rate = max(slew_rate, 1);
return ret;
}
static int spmi_regulator_init_slew_rate_ftsmps426(struct spmi_regulator *vreg,
int clock_rate)
{
int ret;
u8 reg = 0;
int delay, slew_rate;
const struct spmi_voltage_range *range = &vreg->set_points->range[0];
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_STEP_CTRL, ®, 1);
if (ret) {
dev_err(vreg->dev, "spmi read failed, ret=%d\n", ret);
return ret;
}
delay = reg & SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK;
delay >>= SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT;
/* slew_rate has units of uV/us */
slew_rate = clock_rate * range->step_uV;
slew_rate /= 1000 * (SPMI_FTSMPS426_STEP_DELAY << delay);
slew_rate *= SPMI_FTSMPS426_STEP_MARGIN_NUM;
slew_rate /= SPMI_FTSMPS426_STEP_MARGIN_DEN;
/* Ensure that the slew rate is greater than 0 */
vreg->slew_rate = max(slew_rate, 1);
return ret;
}
static int spmi_regulator_init_slew_rate_hfsmps(struct spmi_regulator *vreg)
{
int ret;
u8 reg = 0;
int delay;
ret = spmi_vreg_read(vreg, SPMI_HFSMPS_REG_STEP_CTRL, ®, 1);
if (ret) {
dev_err(vreg->dev, "spmi read failed, ret=%d\n", ret);
return ret;
}
delay = reg & SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK;
delay >>= SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT;
vreg->slew_rate = SPMI_HFSMPS_SLEW_RATE_38p4 >> delay;
return ret;
}
static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
const struct spmi_regulator_init_data *data)
{
int ret;
enum spmi_regulator_logical_type type;
u8 ctrl_reg[8], reg, mask;
type = vreg->logical_type;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
if (ret)
return ret;
/* Set up enable pin control. */
if (!(data->pin_ctrl_enable & SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
switch (type) {
case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_LDO:
case SPMI_REGULATOR_LOGICAL_TYPE_VS:
ctrl_reg[SPMI_COMMON_IDX_ENABLE] &=
~SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
ctrl_reg[SPMI_COMMON_IDX_ENABLE] |=
data->pin_ctrl_enable & SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
break;
default:
break;
}
}
/* Set up mode pin control. */
if (!(data->pin_ctrl_hpm & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
switch (type) {
case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_LDO:
ctrl_reg[SPMI_COMMON_IDX_MODE] &=
~SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
ctrl_reg[SPMI_COMMON_IDX_MODE] |=
data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
break;
case SPMI_REGULATOR_LOGICAL_TYPE_VS:
case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO:
ctrl_reg[SPMI_COMMON_IDX_MODE] &=
~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
ctrl_reg[SPMI_COMMON_IDX_MODE] |=
data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
break;
default:
break;
}
}
/* Write back any control register values that were modified. */
ret = spmi_vreg_write(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
if (ret)
return ret;
/* Set soft start strength and over current protection for VS. */
if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS) {
if (data->vs_soft_start_strength
!= SPMI_VS_SOFT_START_STR_HW_DEFAULT) {
reg = data->vs_soft_start_strength
& SPMI_VS_SOFT_START_SEL_MASK;
mask = SPMI_VS_SOFT_START_SEL_MASK;
return spmi_vreg_update_bits(vreg,
SPMI_VS_REG_SOFT_START,
reg, mask);
}
}
return 0;
}
static void spmi_regulator_get_dt_config(struct spmi_regulator *vreg,
struct device_node *node, struct spmi_regulator_init_data *data)
{
/*
* Initialize configuration parameters to use hardware default in case
* no value is specified via device tree.
*/
data->pin_ctrl_enable = SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
data->pin_ctrl_hpm = SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
data->vs_soft_start_strength = SPMI_VS_SOFT_START_STR_HW_DEFAULT;
/* These bindings are optional, so it is okay if they aren't found. */
of_property_read_u32(node, "qcom,ocp-max-retries",
&vreg->ocp_max_retries);
of_property_read_u32(node, "qcom,ocp-retry-delay",
&vreg->ocp_retry_delay_ms);
of_property_read_u32(node, "qcom,pin-ctrl-enable",
&data->pin_ctrl_enable);
of_property_read_u32(node, "qcom,pin-ctrl-hpm", &data->pin_ctrl_hpm);
of_property_read_u32(node, "qcom,vs-soft-start-strength",
&data->vs_soft_start_strength);
}
static unsigned int spmi_regulator_of_map_mode(unsigned int mode)
{
if (mode == 1)
return REGULATOR_MODE_NORMAL;
if (mode == 2)
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_IDLE;
}
static int spmi_regulator_of_parse(struct device_node *node,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct spmi_regulator_init_data data = { };
struct spmi_regulator *vreg = config->driver_data;
struct device *dev = config->dev;
int ret;
spmi_regulator_get_dt_config(vreg, node, &data);
if (!vreg->ocp_max_retries)
vreg->ocp_max_retries = SPMI_VS_OCP_DEFAULT_MAX_RETRIES;
if (!vreg->ocp_retry_delay_ms)
vreg->ocp_retry_delay_ms = SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS;
ret = spmi_regulator_init_registers(vreg, &data);
if (ret) {
dev_err(dev, "common initialization failed, ret=%d\n", ret);
return ret;
}
switch (vreg->logical_type) {
case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
ret = spmi_regulator_init_slew_rate(vreg);
if (ret)
return ret;
break;
case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS426:
ret = spmi_regulator_init_slew_rate_ftsmps426(vreg,
SPMI_FTSMPS426_CLOCK_RATE);
if (ret)
return ret;
break;
case SPMI_REGULATOR_LOGICAL_TYPE_HFS430:
ret = spmi_regulator_init_slew_rate_ftsmps426(vreg,
SPMI_HFS430_CLOCK_RATE);
if (ret)
return ret;
break;
case SPMI_REGULATOR_LOGICAL_TYPE_HFSMPS:
case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3:
ret = spmi_regulator_init_slew_rate_hfsmps(vreg);
if (ret)
return ret;
break;
default:
break;
}
if (vreg->logical_type != SPMI_REGULATOR_LOGICAL_TYPE_VS)
vreg->ocp_irq = 0;
if (vreg->ocp_irq) {
ret = devm_request_irq(dev, vreg->ocp_irq,
spmi_regulator_vs_ocp_isr, IRQF_TRIGGER_RISING, "ocp",
vreg);
if (ret < 0) {
dev_err(dev, "failed to request irq %d, ret=%d\n",
vreg->ocp_irq, ret);
return ret;
}
ret = devm_delayed_work_autocancel(dev, &vreg->ocp_work,
spmi_regulator_vs_ocp_work);
if (ret)
return ret;
}
return 0;
}
static const struct spmi_regulator_data pm6125_regulators[] = {
{ "s1", 0x1400, "vdd_s1" },
{ "s2", 0x1700, "vdd_s2" },
{ "s3", 0x1a00, "vdd_s3" },
{ "s4", 0x1d00, "vdd_s4" },
{ "s5", 0x2000, "vdd_s5" },
{ "s6", 0x2300, "vdd_s6" },
{ "s7", 0x2600, "vdd_s7" },
{ "s8", 0x2900, "vdd_s8" },
{ "l1", 0x4000, "vdd_l1_l7_l17_l18" },
{ "l2", 0x4100, "vdd_l2_l3_l4" },
{ "l3", 0x4200, "vdd_l2_l3_l4" },
{ "l4", 0x4300, "vdd_l2_l3_l4" },
{ "l5", 0x4400, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l6", 0x4500, "vdd_l6_l8" },
{ "l7", 0x4600, "vdd_l1_l7_l17_l18" },
{ "l8", 0x4700, "vdd_l6_l8" },
{ "l9", 0x4800, "vdd_l9_l11" },
{ "l10", 0x4900, "vdd_l10_l13_l14" },
{ "l11", 0x4a00, "vdd_l9_l11" },
{ "l12", 0x4b00, "vdd_l12_l16" },
{ "l13", 0x4c00, "vdd_l10_l13_l14" },
{ "l14", 0x4d00, "vdd_l10_l13_l14" },
{ "l15", 0x4e00, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l16", 0x4f00, "vdd_l12_l16" },
{ "l17", 0x5000, "vdd_l1_l7_l17_l18" },
{ "l18", 0x5100, "vdd_l1_l7_l17_l18" },
{ "l19", 0x5200, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l20", 0x5300, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l21", 0x5400, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l22", 0x5500, "vdd_l5_l15_l19_l20_l21_l22" },
{ "l23", 0x5600, "vdd_l23_l24" },
{ "l24", 0x5700, "vdd_l23_l24" },
};
static const struct spmi_regulator_data pm660_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s3", },
{ "s5", 0x2000, "vdd_s5", },
{ "s6", 0x2300, "vdd_s6", },
{ "l1", 0x4000, "vdd_l1_l6_l7", },
{ "l2", 0x4100, "vdd_l2_l3", },
{ "l3", 0x4200, "vdd_l2_l3", },
/* l4 is unaccessible on PM660 */
{ "l5", 0x4400, "vdd_l5", },
{ "l6", 0x4500, "vdd_l1_l6_l7", },
{ "l7", 0x4600, "vdd_l1_l6_l7", },
{ "l8", 0x4700, "vdd_l8_l9_l10_l11_l12_l13_l14", },
{ "l9", 0x4800, "vdd_l8_l9_l10_l11_l12_l13_l14", },
{ "l10", 0x4900, "vdd_l8_l9_l10_l11_l12_l13_l14", },
{ "l11", 0x4a00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
{ "l12", 0x4b00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
{ "l13", 0x4c00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
{ "l14", 0x4d00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
{ "l15", 0x4e00, "vdd_l15_l16_l17_l18_l19", },
{ "l16", 0x4f00, "vdd_l15_l16_l17_l18_l19", },
{ "l17", 0x5000, "vdd_l15_l16_l17_l18_l19", },
{ "l18", 0x5100, "vdd_l15_l16_l17_l18_l19", },
{ "l19", 0x5200, "vdd_l15_l16_l17_l18_l19", },
{ }
};
static const struct spmi_regulator_data pm660l_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s4", },
{ "s5", 0x2000, "vdd_s5", },
{ "l1", 0x4000, "vdd_l1_l9_l10", },
{ "l2", 0x4100, "vdd_l2", },
{ "l3", 0x4200, "vdd_l3_l5_l7_l8", },
{ "l4", 0x4300, "vdd_l4_l6", },
{ "l5", 0x4400, "vdd_l3_l5_l7_l8", },
{ "l6", 0x4500, "vdd_l4_l6", },
{ "l7", 0x4600, "vdd_l3_l5_l7_l8", },
{ "l8", 0x4700, "vdd_l3_l5_l7_l8", },
{ "l9", 0x4800, "vdd_l1_l9_l10", },
{ "l10", 0x4900, "vdd_l1_l9_l10", },
{ }
};
static const struct spmi_regulator_data pm8004_regulators[] = {
{ "s2", 0x1700, "vdd_s2", },
{ "s5", 0x2000, "vdd_s5", },
{ }
};
static const struct spmi_regulator_data pm8005_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s4", },
{ }
};
static const struct spmi_regulator_data pm8226_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s4", },
{ "s5", 0x2000, "vdd_s5", },
{ "l1", 0x4000, "vdd_l1_l2_l4_l5", },
{ "l2", 0x4100, "vdd_l1_l2_l4_l5", },
{ "l3", 0x4200, "vdd_l3_l24_l26", },
{ "l4", 0x4300, "vdd_l1_l2_l4_l5", },
{ "l5", 0x4400, "vdd_l1_l2_l4_l5", },
{ "l6", 0x4500, "vdd_l6_l7_l8_l9_l27", },
{ "l7", 0x4600, "vdd_l6_l7_l8_l9_l27", },
{ "l8", 0x4700, "vdd_l6_l7_l8_l9_l27", },
{ "l9", 0x4800, "vdd_l6_l7_l8_l9_l27", },
{ "l10", 0x4900, "vdd_l10_l11_l13", },
{ "l11", 0x4a00, "vdd_l10_l11_l13", },
{ "l12", 0x4b00, "vdd_l12_l14", },
{ "l13", 0x4c00, "vdd_l10_l11_l13", },
{ "l14", 0x4d00, "vdd_l12_l14", },
{ "l15", 0x4e00, "vdd_l15_l16_l17_l18", },
{ "l16", 0x4f00, "vdd_l15_l16_l17_l18", },
{ "l17", 0x5000, "vdd_l15_l16_l17_l18", },
{ "l18", 0x5100, "vdd_l15_l16_l17_l18", },
{ "l19", 0x5200, "vdd_l19_l20_l21_l22_l23_l28", },
{ "l20", 0x5300, "vdd_l19_l20_l21_l22_l23_l28", },
{ "l21", 0x5400, "vdd_l19_l20_l21_l22_l23_l28", },
{ "l22", 0x5500, "vdd_l19_l20_l21_l22_l23_l28", },
{ "l23", 0x5600, "vdd_l19_l20_l21_l22_l23_l28", },
{ "l24", 0x5700, "vdd_l3_l24_l26", },
{ "l25", 0x5800, "vdd_l25", },
{ "l26", 0x5900, "vdd_l3_l24_l26", },
{ "l27", 0x5a00, "vdd_l6_l7_l8_l9_l27", },
{ "l28", 0x5b00, "vdd_l19_l20_l21_l22_l23_l28", },
{ "lvs1", 0x8000, "vdd_lvs1", },
{ }
};
static const struct spmi_regulator_data pm8841_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", NULL, 0x1c08 },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s4", NULL, 0x1c08 },
{ "s5", 0x2000, "vdd_s5", NULL, 0x1c08 },
{ "s6", 0x2300, "vdd_s6", NULL, 0x1c08 },
{ "s7", 0x2600, "vdd_s7", NULL, 0x1c08 },
{ "s8", 0x2900, "vdd_s8", NULL, 0x1c08 },
{ }
};
static const struct spmi_regulator_data pm8916_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s4", },
{ "l1", 0x4000, "vdd_l1_l3", },
{ "l2", 0x4100, "vdd_l2", },
{ "l3", 0x4200, "vdd_l1_l3", },
{ "l4", 0x4300, "vdd_l4_l5_l6", },
{ "l5", 0x4400, "vdd_l4_l5_l6", },
{ "l6", 0x4500, "vdd_l4_l5_l6", },
{ "l7", 0x4600, "vdd_l7", },
{ "l8", 0x4700, "vdd_l8_l11_l14_l15_l16", },
{ "l9", 0x4800, "vdd_l9_l10_l12_l13_l17_l18", },
{ "l10", 0x4900, "vdd_l9_l10_l12_l13_l17_l18", },
{ "l11", 0x4a00, "vdd_l8_l11_l14_l15_l16", },
{ "l12", 0x4b00, "vdd_l9_l10_l12_l13_l17_l18", },
{ "l13", 0x4c00, "vdd_l9_l10_l12_l13_l17_l18", },
{ "l14", 0x4d00, "vdd_l8_l11_l14_l15_l16", },
{ "l15", 0x4e00, "vdd_l8_l11_l14_l15_l16", },
{ "l16", 0x4f00, "vdd_l8_l11_l14_l15_l16", },
{ "l17", 0x5000, "vdd_l9_l10_l12_l13_l17_l18", },
{ "l18", 0x5100, "vdd_l9_l10_l12_l13_l17_l18", },
{ }
};
static const struct spmi_regulator_data pm8941_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0xa000, },
{ "l1", 0x4000, "vdd_l1_l3", },
{ "l2", 0x4100, "vdd_l2_lvs_1_2_3", },
{ "l3", 0x4200, "vdd_l1_l3", },
{ "l4", 0x4300, "vdd_l4_l11", },
{ "l5", 0x4400, "vdd_l5_l7", NULL, 0x0410 },
{ "l6", 0x4500, "vdd_l6_l12_l14_l15", },
{ "l7", 0x4600, "vdd_l5_l7", NULL, 0x0410 },
{ "l8", 0x4700, "vdd_l8_l16_l18_19", },
{ "l9", 0x4800, "vdd_l9_l10_l17_l22", },
{ "l10", 0x4900, "vdd_l9_l10_l17_l22", },
{ "l11", 0x4a00, "vdd_l4_l11", },
{ "l12", 0x4b00, "vdd_l6_l12_l14_l15", },
{ "l13", 0x4c00, "vdd_l13_l20_l23_l24", },
{ "l14", 0x4d00, "vdd_l6_l12_l14_l15", },
{ "l15", 0x4e00, "vdd_l6_l12_l14_l15", },
{ "l16", 0x4f00, "vdd_l8_l16_l18_19", },
{ "l17", 0x5000, "vdd_l9_l10_l17_l22", },
{ "l18", 0x5100, "vdd_l8_l16_l18_19", },
{ "l19", 0x5200, "vdd_l8_l16_l18_19", },
{ "l20", 0x5300, "vdd_l13_l20_l23_l24", },
{ "l21", 0x5400, "vdd_l21", },
{ "l22", 0x5500, "vdd_l9_l10_l17_l22", },
{ "l23", 0x5600, "vdd_l13_l20_l23_l24", },
{ "l24", 0x5700, "vdd_l13_l20_l23_l24", },
{ "lvs1", 0x8000, "vdd_l2_lvs_1_2_3", },
{ "lvs2", 0x8100, "vdd_l2_lvs_1_2_3", },
{ "lvs3", 0x8200, "vdd_l2_lvs_1_2_3", },
{ "5vs1", 0x8300, "vin_5vs", "ocp-5vs1", },
{ "5vs2", 0x8400, "vin_5vs", "ocp-5vs2", },
{ }
};
static const struct spmi_regulator_data pm8950_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s4", },
{ "s5", 0x2000, "vdd_s5", },
{ "s6", 0x2300, "vdd_s6", },
{ "l1", 0x4000, "vdd_l1_l19", },
{ "l2", 0x4100, "vdd_l2_l23", },
{ "l3", 0x4200, "vdd_l3", },
{ "l4", 0x4300, "vdd_l4_l5_l6_l7_l16", },
{ "l5", 0x4400, "vdd_l4_l5_l6_l7_l16", },
{ "l6", 0x4500, "vdd_l4_l5_l6_l7_l16", },
{ "l7", 0x4600, "vdd_l4_l5_l6_l7_l16", },
{ "l8", 0x4700, "vdd_l8_l11_l12_l17_l22", },
{ "l9", 0x4800, "vdd_l9_l10_l13_l14_l15_l18", },
{ "l10", 0x4900, "vdd_l9_l10_l13_l14_l15_l18", },
{ "l11", 0x4a00, "vdd_l8_l11_l12_l17_l22", },
{ "l12", 0x4b00, "vdd_l8_l11_l12_l17_l22", },
{ "l13", 0x4c00, "vdd_l9_l10_l13_l14_l15_l18", },
{ "l14", 0x4d00, "vdd_l9_l10_l13_l14_l15_l18", },
{ "l15", 0x4e00, "vdd_l9_l10_l13_l14_l15_l18", },
{ "l16", 0x4f00, "vdd_l4_l5_l6_l7_l16", },
{ "l17", 0x5000, "vdd_l8_l11_l12_l17_l22", },
{ "l18", 0x5100, "vdd_l9_l10_l13_l14_l15_l18", },
{ "l19", 0x5200, "vdd_l1_l19", },
{ "l20", 0x5300, "vdd_l20", },
{ "l21", 0x5400, "vdd_l21", },
{ "l22", 0x5500, "vdd_l8_l11_l12_l17_l22", },
{ "l23", 0x5600, "vdd_l2_l23", },
{ }
};
static const struct spmi_regulator_data pm8994_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "s4", 0x1d00, "vdd_s4", },
{ "s5", 0x2000, "vdd_s5", },
{ "s6", 0x2300, "vdd_s6", },
{ "s7", 0x2600, "vdd_s7", },
{ "s8", 0x2900, "vdd_s8", },
{ "s9", 0x2c00, "vdd_s9", },
{ "s10", 0x2f00, "vdd_s10", },
{ "s11", 0x3200, "vdd_s11", },
{ "s12", 0x3500, "vdd_s12", },
{ "l1", 0x4000, "vdd_l1", },
{ "l2", 0x4100, "vdd_l2_l26_l28", },
{ "l3", 0x4200, "vdd_l3_l11", },
{ "l4", 0x4300, "vdd_l4_l27_l31", },
{ "l5", 0x4400, "vdd_l5_l7", },
{ "l6", 0x4500, "vdd_l6_l12_l32", },
{ "l7", 0x4600, "vdd_l5_l7", },
{ "l8", 0x4700, "vdd_l8_l16_l30", },
{ "l9", 0x4800, "vdd_l9_l10_l18_l22", },
{ "l10", 0x4900, "vdd_l9_l10_l18_l22", },
{ "l11", 0x4a00, "vdd_l3_l11", },
{ "l12", 0x4b00, "vdd_l6_l12_l32", },
{ "l13", 0x4c00, "vdd_l13_l19_l23_l24", },
{ "l14", 0x4d00, "vdd_l14_l15", },
{ "l15", 0x4e00, "vdd_l14_l15", },
{ "l16", 0x4f00, "vdd_l8_l16_l30", },
{ "l17", 0x5000, "vdd_l17_l29", },
{ "l18", 0x5100, "vdd_l9_l10_l18_l22", },
{ "l19", 0x5200, "vdd_l13_l19_l23_l24", },
{ "l20", 0x5300, "vdd_l20_l21", },
{ "l21", 0x5400, "vdd_l20_l21", },
{ "l22", 0x5500, "vdd_l9_l10_l18_l22", },
{ "l23", 0x5600, "vdd_l13_l19_l23_l24", },
{ "l24", 0x5700, "vdd_l13_l19_l23_l24", },
{ "l25", 0x5800, "vdd_l25", },
{ "l26", 0x5900, "vdd_l2_l26_l28", },
{ "l27", 0x5a00, "vdd_l4_l27_l31", },
{ "l28", 0x5b00, "vdd_l2_l26_l28", },
{ "l29", 0x5c00, "vdd_l17_l29", },
{ "l30", 0x5d00, "vdd_l8_l16_l30", },
{ "l31", 0x5e00, "vdd_l4_l27_l31", },
{ "l32", 0x5f00, "vdd_l6_l12_l32", },
{ "lvs1", 0x8000, "vdd_lvs_1_2", },
{ "lvs2", 0x8100, "vdd_lvs_1_2", },
{ }
};
static const struct spmi_regulator_data pmi8994_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "l1", 0x4000, "vdd_l1", },
{ }
};
static const struct spmi_regulator_data pmp8074_regulators[] = {
{ "s1", 0x1400, "vdd_s1"},
{ "s2", 0x1700, "vdd_s2"},
{ "s3", 0x1a00, "vdd_s3"},
{ "s4", 0x1d00, "vdd_s4"},
{ "s5", 0x2000, "vdd_s5"},
{ "l1", 0x4000, "vdd_l1_l2"},
{ "l2", 0x4100, "vdd_l1_l2"},
{ "l3", 0x4200, "vdd_l3_l8"},
{ "l4", 0x4300, "vdd_l4"},
{ "l5", 0x4400, "vdd_l5_l6_l15"},
{ "l6", 0x4500, "vdd_l5_l6_l15"},
{ "l7", 0x4600, "vdd_l7"},
{ "l8", 0x4700, "vdd_l3_l8"},
{ "l9", 0x4800, "vdd_l9"},
/* l10 is currently unsupported HT_P50 */
{ "l11", 0x4a00, "vdd_l10_l11_l12_l13"},
{ "l12", 0x4b00, "vdd_l10_l11_l12_l13"},
{ "l13", 0x4c00, "vdd_l10_l11_l12_l13"},
{ }
};
static const struct spmi_regulator_data pms405_regulators[] = {
{ "s3", 0x1a00, "vdd_s3"},
{ }
};
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm6125-regulators", .data = &pm6125_regulators },
{ .compatible = "qcom,pm660-regulators", .data = &pm660_regulators },
{ .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators },
{ .compatible = "qcom,pm8004-regulators", .data = &pm8004_regulators },
{ .compatible = "qcom,pm8005-regulators", .data = &pm8005_regulators },
{ .compatible = "qcom,pm8226-regulators", .data = &pm8226_regulators },
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ .compatible = "qcom,pmp8074-regulators", .data = &pmp8074_regulators },
{ .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
static int qcom_spmi_regulator_probe(struct platform_device *pdev)
{
const struct spmi_regulator_data *reg;
const struct spmi_voltage_range *range;
const struct of_device_id *match;
struct regulator_config config = { };
struct regulator_dev *rdev;
struct spmi_regulator *vreg;
struct regmap *regmap;
const char *name;
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct device_node *syscon, *reg_node;
struct property *reg_prop;
int ret, lenp;
struct list_head *vreg_list;
vreg_list = devm_kzalloc(dev, sizeof(*vreg_list), GFP_KERNEL);
if (!vreg_list)
return -ENOMEM;
INIT_LIST_HEAD(vreg_list);
platform_set_drvdata(pdev, vreg_list);
regmap = dev_get_regmap(dev->parent, NULL);
if (!regmap)
return -ENODEV;
match = of_match_device(qcom_spmi_regulator_match, &pdev->dev);
if (!match)
return -ENODEV;
if (of_find_property(node, "qcom,saw-reg", &lenp)) {
syscon = of_parse_phandle(node, "qcom,saw-reg", 0);
saw_regmap = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(saw_regmap))
dev_err(dev, "ERROR reading SAW regmap\n");
}
for (reg = match->data; reg->name; reg++) {
if (saw_regmap) {
reg_node = of_get_child_by_name(node, reg->name);
reg_prop = of_find_property(reg_node, "qcom,saw-slave",
&lenp);
of_node_put(reg_node);
if (reg_prop)
continue;
}
vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
if (!vreg)
return -ENOMEM;
vreg->dev = dev;
vreg->base = reg->base;
vreg->regmap = regmap;
if (reg->ocp) {
vreg->ocp_irq = platform_get_irq_byname(pdev, reg->ocp);
if (vreg->ocp_irq < 0)
return vreg->ocp_irq;
}
vreg->desc.id = -1;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
vreg->desc.enable_reg = reg->base + SPMI_COMMON_REG_ENABLE;
vreg->desc.enable_mask = SPMI_COMMON_ENABLE_MASK;
vreg->desc.enable_val = SPMI_COMMON_ENABLE;
vreg->desc.name = name = reg->name;
vreg->desc.supply_name = reg->supply;
vreg->desc.of_match = reg->name;
vreg->desc.of_parse_cb = spmi_regulator_of_parse;
vreg->desc.of_map_mode = spmi_regulator_of_map_mode;
ret = spmi_regulator_match(vreg, reg->force_type);
if (ret)
continue;
if (saw_regmap) {
reg_node = of_get_child_by_name(node, reg->name);
reg_prop = of_find_property(reg_node, "qcom,saw-leader",
&lenp);
of_node_put(reg_node);
if (reg_prop) {
spmi_saw_ops = *(vreg->desc.ops);
spmi_saw_ops.set_voltage_sel =
spmi_regulator_saw_set_voltage;
vreg->desc.ops = &spmi_saw_ops;
}
}
if (vreg->set_points && vreg->set_points->count == 1) {
/* since there is only one range */
range = vreg->set_points->range;
vreg->desc.uV_step = range->step_uV;
}
config.dev = dev;
config.driver_data = vreg;
config.regmap = regmap;
rdev = devm_regulator_register(dev, &vreg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", name);
return PTR_ERR(rdev);
}
INIT_LIST_HEAD(&vreg->node);
list_add(&vreg->node, vreg_list);
}
return 0;
}
static struct platform_driver qcom_spmi_regulator_driver = {
.driver = {
.name = "qcom-spmi-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = qcom_spmi_regulator_match,
},
.probe = qcom_spmi_regulator_probe,
};
module_platform_driver(qcom_spmi_regulator_driver);
MODULE_DESCRIPTION("Qualcomm SPMI PMIC regulator driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qcom-spmi-regulator");
| linux-master | drivers/regulator/qcom_spmi-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Regulator driver for TPS68470 PMIC
//
// Copyright (c) 2021 Red Hat Inc.
// Copyright (C) 2018 Intel Corporation
//
// Authors:
// Hans de Goede <[email protected]>
// Zaikuo Wang <[email protected]>
// Tianshu Qiu <[email protected]>
// Jian Xu Zheng <[email protected]>
// Yuning Pu <[email protected]>
// Rajmohan Mani <[email protected]>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mfd/tps68470.h>
#include <linux/module.h>
#include <linux/platform_data/tps68470.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
struct tps68470_regulator_data {
struct clk *clk;
};
#define TPS68470_REGULATOR(_name, _id, _ops, _n, \
_vr, _vm, _er, _em, _lr, _nlr) \
[TPS68470_ ## _name] = { \
.name = # _name, \
.id = _id, \
.ops = &_ops, \
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = _vr, \
.vsel_mask = _vm, \
.enable_reg = _er, \
.enable_mask = _em, \
.linear_ranges = _lr, \
.n_linear_ranges = _nlr, \
}
static const struct linear_range tps68470_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(875000, 0, 125, 17800),
};
static const struct linear_range tps68470_core_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 42, 25000),
};
static int tps68470_regulator_enable(struct regulator_dev *rdev)
{
struct tps68470_regulator_data *data = rdev->reg_data;
int ret;
/* The Core buck regulator needs the PMIC's PLL to be enabled */
if (rdev->desc->id == TPS68470_CORE) {
ret = clk_prepare_enable(data->clk);
if (ret) {
dev_err(&rdev->dev, "Error enabling TPS68470 clock\n");
return ret;
}
}
return regulator_enable_regmap(rdev);
}
static int tps68470_regulator_disable(struct regulator_dev *rdev)
{
struct tps68470_regulator_data *data = rdev->reg_data;
if (rdev->desc->id == TPS68470_CORE)
clk_disable_unprepare(data->clk);
return regulator_disable_regmap(rdev);
}
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
static const struct regulator_ops tps68470_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps68470_regulator_enable,
.disable = tps68470_regulator_disable,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
};
static const struct regulator_ops tps68470_always_on_reg_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
};
static const struct regulator_desc regulators[] = {
TPS68470_REGULATOR(CORE, TPS68470_CORE, tps68470_regulator_ops, 43,
TPS68470_REG_VDVAL, TPS68470_VDVAL_DVOLT_MASK,
TPS68470_REG_VDCTL, TPS68470_VDCTL_EN_MASK,
tps68470_core_ranges, ARRAY_SIZE(tps68470_core_ranges)),
TPS68470_REGULATOR(ANA, TPS68470_ANA, tps68470_regulator_ops, 126,
TPS68470_REG_VAVAL, TPS68470_VAVAL_AVOLT_MASK,
TPS68470_REG_VACTL, TPS68470_VACTL_EN_MASK,
tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
TPS68470_REGULATOR(VCM, TPS68470_VCM, tps68470_regulator_ops, 126,
TPS68470_REG_VCMVAL, TPS68470_VCMVAL_VCVOLT_MASK,
TPS68470_REG_VCMCTL, TPS68470_VCMCTL_EN_MASK,
tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
TPS68470_REGULATOR(VIO, TPS68470_VIO, tps68470_always_on_reg_ops, 126,
TPS68470_REG_VIOVAL, TPS68470_VIOVAL_IOVOLT_MASK,
0, 0,
tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
/*
* (1) This regulator must have the same voltage as VIO if S_IO LDO is used to
* power a sensor/VCM which I2C is daisy chained behind the PMIC.
* (2) If there is no I2C daisy chain it can be set freely.
*/
TPS68470_REGULATOR(VSIO, TPS68470_VSIO, tps68470_regulator_ops, 126,
TPS68470_REG_VSIOVAL, TPS68470_VSIOVAL_IOVOLT_MASK,
TPS68470_REG_S_I2C_CTL, TPS68470_S_I2C_CTL_EN_MASK,
tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
TPS68470_REGULATOR(AUX1, TPS68470_AUX1, tps68470_regulator_ops, 126,
TPS68470_REG_VAUX1VAL, TPS68470_VAUX1VAL_AUX1VOLT_MASK,
TPS68470_REG_VAUX1CTL, TPS68470_VAUX1CTL_EN_MASK,
tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
TPS68470_REGULATOR(AUX2, TPS68470_AUX2, tps68470_regulator_ops, 126,
TPS68470_REG_VAUX2VAL, TPS68470_VAUX2VAL_AUX2VOLT_MASK,
TPS68470_REG_VAUX2CTL, TPS68470_VAUX2CTL_EN_MASK,
tps68470_ldo_ranges, ARRAY_SIZE(tps68470_ldo_ranges)),
};
static int tps68470_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tps68470_regulator_platform_data *pdata = dev_get_platdata(dev);
struct tps68470_regulator_data *data;
struct regulator_config config = { };
struct regulator_dev *rdev;
int i;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->clk = devm_clk_get(dev, "tps68470-clk");
if (IS_ERR(data->clk))
return dev_err_probe(dev, PTR_ERR(data->clk), "getting tps68470-clk\n");
config.dev = dev->parent;
config.regmap = dev_get_drvdata(dev->parent);
config.driver_data = data;
for (i = 0; i < TPS68470_NUM_REGULATORS; i++) {
if (pdata)
config.init_data = pdata->reg_init_data[i];
else
config.init_data = NULL;
rdev = devm_regulator_register(dev, ®ulators[i], &config);
if (IS_ERR(rdev))
return dev_err_probe(dev, PTR_ERR(rdev),
"registering %s regulator\n",
regulators[i].name);
}
return 0;
}
static struct platform_driver tps68470_regulator_driver = {
.driver = {
.name = "tps68470-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tps68470_regulator_probe,
};
/*
* The ACPI tps68470 probe-ordering depends on the clk/gpio/regulator drivers
* registering before the drivers for the camera-sensors which use them bind.
* subsys_initcall() ensures this when the drivers are builtin.
*/
static int __init tps68470_regulator_init(void)
{
return platform_driver_register(&tps68470_regulator_driver);
}
subsys_initcall(tps68470_regulator_init);
static void __exit tps68470_regulator_exit(void)
{
platform_driver_unregister(&tps68470_regulator_driver);
}
module_exit(tps68470_regulator_exit);
MODULE_ALIAS("platform:tps68470-regulator");
MODULE_DESCRIPTION("TPS68470 voltage regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/tps68470-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#define RT5120_REG_PGSTAT 0x03
#define RT5120_REG_CH1VID 0x06
#define RT5120_REG_CH1SLPVID 0x07
#define RT5120_REG_ENABLE 0x08
#define RT5120_REG_MODECTL 0x09
#define RT5120_REG_UVOVPROT 0x0A
#define RT5120_REG_SLPCTL 0x0C
#define RT5120_REG_INTSTAT 0x1E
#define RT5120_REG_DISCHG 0x1F
#define RT5120_OUTPG_MASK(rid) BIT(rid + 1)
#define RT5120_OUTUV_MASK(rid) BIT(rid + 9)
#define RT5120_OUTOV_MASK(rid) BIT(rid + 16)
#define RT5120_CH1VID_MASK GENMASK(6, 0)
#define RT5120_RIDEN_MASK(rid) BIT(rid + 1)
#define RT5120_RADEN_MASK(rid) BIT(rid)
#define RT5120_FPWM_MASK(rid) BIT(rid + 1)
#define RT5120_UVHICCUP_MASK BIT(1)
#define RT5120_OVHICCUP_MASK BIT(0)
#define RT5120_HOTDIE_MASK BIT(1)
#define RT5120_BUCK1_MINUV 600000
#define RT5120_BUCK1_MAXUV 1393750
#define RT5120_BUCK1_STEPUV 6250
#define RT5120_BUCK1_NUM_VOLT 0x80
#define RT5120_AUTO_MODE 0
#define RT5120_FPWM_MODE 1
enum {
RT5120_REGULATOR_BUCK1 = 0,
RT5120_REGULATOR_BUCK2,
RT5120_REGULATOR_BUCK3,
RT5120_REGULATOR_BUCK4,
RT5120_REGULATOR_LDO,
RT5120_REGULATOR_EXTEN,
RT5120_MAX_REGULATOR
};
struct rt5120_priv {
struct device *dev;
struct regmap *regmap;
struct regulator_desc rdesc[RT5120_MAX_REGULATOR];
};
static int rt5120_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct regmap *regmap = rdev_get_regmap(rdev);
int rid = rdev_get_id(rdev);
unsigned int mask = RT5120_FPWM_MASK(rid), val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = 0;
break;
case REGULATOR_MODE_FAST:
val = RT5120_FPWM_MASK(rid);
break;
default:
return -EINVAL;
}
return regmap_update_bits(regmap, RT5120_REG_MODECTL, mask, val);
}
static unsigned int rt5120_buck_get_mode(struct regulator_dev *rdev)
{
struct regmap *regmap = rdev_get_regmap(rdev);
int ret, rid = rdev_get_id(rdev);
unsigned int val;
ret = regmap_read(regmap, RT5120_REG_MODECTL, &val);
if (ret)
return REGULATOR_MODE_INVALID;
if (val & RT5120_FPWM_MASK(rid))
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_NORMAL;
}
static int rt5120_regulator_get_error_flags(struct regulator_dev *rdev,
unsigned int *flags)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int stat, hd_stat, cur_flags = 0;
int rid = rdev_get_id(rdev), ret;
/*
* reg 0x03/0x04/0x05 to indicate PG/UV/OV
* use block read to descrease I/O xfer time
*/
ret = regmap_raw_read(regmap, RT5120_REG_PGSTAT, &stat, 3);
if (ret)
return ret;
ret = regmap_read(regmap, RT5120_REG_INTSTAT, &hd_stat);
if (ret)
return ret;
if (!(stat & RT5120_OUTPG_MASK(rid))) {
if (stat & RT5120_OUTUV_MASK(rid))
cur_flags |= REGULATOR_ERROR_UNDER_VOLTAGE;
if (stat & RT5120_OUTOV_MASK(rid))
cur_flags |= REGULATOR_ERROR_REGULATION_OUT;
}
if (hd_stat & RT5120_HOTDIE_MASK)
cur_flags |= REGULATOR_ERROR_OVER_TEMP;
*flags = cur_flags;
return 0;
}
static int rt5120_buck1_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct regmap *regmap = rdev_get_regmap(rdev);
int sel;
if (uV < RT5120_BUCK1_MINUV || uV > RT5120_BUCK1_MAXUV)
return -EINVAL;
sel = (uV - RT5120_BUCK1_MINUV) / RT5120_BUCK1_STEPUV;
return regmap_write(regmap, RT5120_REG_CH1SLPVID, sel);
}
static int rt5120_regulator_set_suspend_enable(struct regulator_dev *rdev)
{
struct regmap *regmap = rdev_get_regmap(rdev);
int rid = rdev_get_id(rdev);
unsigned int mask = RT5120_RIDEN_MASK(rid);
return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, mask);
}
static int rt5120_regulator_set_suspend_disable(struct regulator_dev *rdev)
{
struct regmap *regmap = rdev_get_regmap(rdev);
int rid = rdev_get_id(rdev);
unsigned int mask = RT5120_RIDEN_MASK(rid);
return regmap_update_bits(regmap, RT5120_REG_SLPCTL, mask, 0);
}
static const struct regulator_ops rt5120_buck1_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.set_mode = rt5120_buck_set_mode,
.get_mode = rt5120_buck_get_mode,
.get_error_flags = rt5120_regulator_get_error_flags,
.set_suspend_voltage = rt5120_buck1_set_suspend_voltage,
.set_suspend_enable = rt5120_regulator_set_suspend_enable,
.set_suspend_disable = rt5120_regulator_set_suspend_disable,
};
static const struct regulator_ops rt5120_buck234_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.set_mode = rt5120_buck_set_mode,
.get_mode = rt5120_buck_get_mode,
.get_error_flags = rt5120_regulator_get_error_flags,
.set_suspend_enable = rt5120_regulator_set_suspend_enable,
.set_suspend_disable = rt5120_regulator_set_suspend_disable,
};
static const struct regulator_ops rt5120_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.get_error_flags = rt5120_regulator_get_error_flags,
.set_suspend_enable = rt5120_regulator_set_suspend_enable,
.set_suspend_disable = rt5120_regulator_set_suspend_disable,
};
static const struct regulator_ops rt5120_exten_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_suspend_enable = rt5120_regulator_set_suspend_enable,
.set_suspend_disable = rt5120_regulator_set_suspend_disable,
};
static unsigned int rt5120_buck_of_map_mode(unsigned int mode)
{
switch (mode) {
case RT5120_AUTO_MODE:
return REGULATOR_MODE_NORMAL;
case RT5120_FPWM_MODE:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_INVALID;
}
}
static void rt5120_fillin_regulator_desc(struct regulator_desc *desc, int rid)
{
static const char * const name[] = {
"buck1", "buck2", "buck3", "buck4", "ldo", "exten" };
static const char * const sname[] = {
"vin1", "vin2", "vin3", "vin4", "vinldo", NULL };
/* Common regulator property */
desc->name = name[rid];
desc->supply_name = sname[rid];
desc->owner = THIS_MODULE;
desc->type = REGULATOR_VOLTAGE;
desc->id = rid;
desc->enable_reg = RT5120_REG_ENABLE;
desc->enable_mask = RT5120_RIDEN_MASK(rid);
desc->active_discharge_reg = RT5120_REG_DISCHG;
desc->active_discharge_mask = RT5120_RADEN_MASK(rid);
desc->active_discharge_on = RT5120_RADEN_MASK(rid);
/* Config n_voltages to 1 for all*/
desc->n_voltages = 1;
/* Only buck support mode change */
if (rid >= RT5120_REGULATOR_BUCK1 && rid <= RT5120_REGULATOR_BUCK4)
desc->of_map_mode = rt5120_buck_of_map_mode;
/* RID specific property init */
switch (rid) {
case RT5120_REGULATOR_BUCK1:
/* Only buck1 support voltage change by I2C */
desc->n_voltages = RT5120_BUCK1_NUM_VOLT;
desc->min_uV = RT5120_BUCK1_MINUV;
desc->uV_step = RT5120_BUCK1_STEPUV;
desc->vsel_reg = RT5120_REG_CH1VID,
desc->vsel_mask = RT5120_CH1VID_MASK,
desc->ops = &rt5120_buck1_ops;
break;
case RT5120_REGULATOR_BUCK2 ... RT5120_REGULATOR_BUCK4:
desc->ops = &rt5120_buck234_ops;
break;
case RT5120_REGULATOR_LDO:
desc->ops = &rt5120_ldo_ops;
break;
default:
desc->ops = &rt5120_exten_ops;
}
}
static int rt5120_of_parse_cb(struct rt5120_priv *priv, int rid,
struct of_regulator_match *match)
{
struct regulator_desc *desc = priv->rdesc + rid;
struct regulator_init_data *init_data = match->init_data;
if (!init_data || rid == RT5120_REGULATOR_BUCK1)
return 0;
if (init_data->constraints.min_uV != init_data->constraints.max_uV) {
dev_err(priv->dev, "Variable voltage for fixed regulator\n");
return -EINVAL;
}
desc->fixed_uV = init_data->constraints.min_uV;
return 0;
}
static struct of_regulator_match rt5120_regu_match[RT5120_MAX_REGULATOR] = {
[RT5120_REGULATOR_BUCK1] = { .name = "buck1", },
[RT5120_REGULATOR_BUCK2] = { .name = "buck2", },
[RT5120_REGULATOR_BUCK3] = { .name = "buck3", },
[RT5120_REGULATOR_BUCK4] = { .name = "buck4", },
[RT5120_REGULATOR_LDO] = { .name = "ldo", },
[RT5120_REGULATOR_EXTEN] = { .name = "exten", }
};
static int rt5120_parse_regulator_dt_data(struct rt5120_priv *priv)
{
struct device *dev = priv->dev->parent;
struct device_node *reg_node;
int i, ret;
for (i = 0; i < RT5120_MAX_REGULATOR; i++) {
rt5120_fillin_regulator_desc(priv->rdesc + i, i);
rt5120_regu_match[i].desc = priv->rdesc + i;
}
reg_node = of_get_child_by_name(dev->of_node, "regulators");
if (!reg_node) {
dev_err(priv->dev, "Couldn't find 'regulators' node\n");
return -ENODEV;
}
ret = of_regulator_match(priv->dev, reg_node, rt5120_regu_match,
ARRAY_SIZE(rt5120_regu_match));
of_node_put(reg_node);
if (ret < 0) {
dev_err(priv->dev,
"Error parsing regulator init data (%d)\n", ret);
return ret;
}
for (i = 0; i < RT5120_MAX_REGULATOR; i++) {
ret = rt5120_of_parse_cb(priv, i, rt5120_regu_match + i);
if (ret) {
dev_err(priv->dev, "Failed in [%d] of_passe_cb\n", i);
return ret;
}
}
return 0;
}
static int rt5120_device_property_init(struct rt5120_priv *priv)
{
struct device *dev = priv->dev->parent;
struct device_node *np = dev->of_node;
bool prot_enable;
unsigned int prot_enable_val = 0;
/* Assign UV/OV HW protection behavior */
prot_enable = of_property_read_bool(np,
"richtek,enable-undervolt-hiccup");
if (prot_enable)
prot_enable_val |= RT5120_UVHICCUP_MASK;
prot_enable = of_property_read_bool(np,
"richtek,enable-overvolt-hiccup");
if (prot_enable)
prot_enable_val |= RT5120_OVHICCUP_MASK;
return regmap_update_bits(priv->regmap, RT5120_REG_UVOVPROT,
RT5120_UVHICCUP_MASK | RT5120_OVHICCUP_MASK,
prot_enable_val);
}
static int rt5120_regulator_probe(struct platform_device *pdev)
{
struct rt5120_priv *priv;
struct regulator_dev *rdev;
struct regulator_config config = {};
int i, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!priv->regmap) {
dev_err(&pdev->dev, "Failed to init regmap\n");
return -ENODEV;
}
ret = rt5120_device_property_init(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to do property init\n");
return ret;
}
ret = rt5120_parse_regulator_dt_data(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to parse dt data\n");
return ret;
}
config.dev = &pdev->dev;
config.regmap = priv->regmap;
for (i = 0; i < RT5120_MAX_REGULATOR; i++) {
config.of_node = rt5120_regu_match[i].of_node;
config.init_data = rt5120_regu_match[i].init_data;
rdev = devm_regulator_register(&pdev->dev, priv->rdesc + i,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"Failed to register regulator [%d]\n", i);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id rt5120_regulator_dev_table[] = {
{ "rt5120-regulator", 0 },
{}
};
MODULE_DEVICE_TABLE(platform, rt5120_regulator_dev_table);
static struct platform_driver rt5120_regulator_driver = {
.driver = {
.name = "rt5120-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = rt5120_regulator_dev_table,
.probe = rt5120_regulator_probe,
};
module_platform_driver(rt5120_regulator_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("Richtek RT5120 regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/rt5120-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for LP873X PMIC
*
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/lp873x.h>
#define LP873X_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, _er, _em, \
_delay, _lr, _cr) \
[_id] = { \
.desc = { \
.name = _name, \
.supply_name = _of "-in", \
.id = _id, \
.of_match = of_match_ptr(_of), \
.regulators_node = of_match_ptr("regulators"),\
.ops = &_ops, \
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = _vr, \
.vsel_mask = _vm, \
.enable_reg = _er, \
.enable_mask = _em, \
.ramp_delay = _delay, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
.curr_table = lp873x_buck_uA, \
.n_current_limits = ARRAY_SIZE(lp873x_buck_uA), \
.csel_reg = (_cr), \
.csel_mask = LP873X_BUCK0_CTRL_2_BUCK0_ILIM,\
}, \
.ctrl2_reg = _cr, \
}
struct lp873x_regulator {
struct regulator_desc desc;
unsigned int ctrl2_reg;
};
static const struct lp873x_regulator regulators[];
static const struct linear_range buck0_buck1_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x13, 0),
REGULATOR_LINEAR_RANGE(700000, 0x14, 0x17, 10000),
REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
};
static const struct linear_range ldo0_ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000),
};
static const unsigned int lp873x_buck_ramp_delay[] = {
30000, 15000, 10000, 7500, 3800, 1900, 940, 470
};
/* LP873X BUCK current limit */
static const unsigned int lp873x_buck_uA[] = {
1500000, 2000000, 2500000, 3000000, 3500000, 4000000,
};
static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
int id = rdev_get_id(rdev);
struct lp873x *lp873 = rdev_get_drvdata(rdev);
unsigned int reg;
int ret;
if (ramp_delay <= 470)
reg = 7;
else if (ramp_delay <= 940)
reg = 6;
else if (ramp_delay <= 1900)
reg = 5;
else if (ramp_delay <= 3800)
reg = 4;
else if (ramp_delay <= 7500)
reg = 3;
else if (ramp_delay <= 10000)
reg = 2;
else if (ramp_delay <= 15000)
reg = 1;
else
reg = 0;
ret = regmap_update_bits(lp873->regmap, regulators[id].ctrl2_reg,
LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE,
reg << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE));
if (ret) {
dev_err(lp873->dev, "SLEW RATE write failed: %d\n", ret);
return ret;
}
rdev->constraints->ramp_delay = lp873x_buck_ramp_delay[reg];
return 0;
}
/* Operations permitted on BUCK0, BUCK1 */
static const struct regulator_ops lp873x_buck01_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = lp873x_buck_set_ramp_delay,
.set_current_limit = regulator_set_current_limit_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
};
/* Operations permitted on LDO0 and LDO1 */
static const struct regulator_ops lp873x_ldo01_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
};
static const struct lp873x_regulator regulators[] = {
LP873X_REGULATOR("BUCK0", LP873X_BUCK_0, "buck0", lp873x_buck01_ops,
256, LP873X_REG_BUCK0_VOUT,
LP873X_BUCK0_VOUT_BUCK0_VSET, LP873X_REG_BUCK0_CTRL_1,
LP873X_BUCK0_CTRL_1_BUCK0_EN, 10000,
buck0_buck1_ranges, LP873X_REG_BUCK0_CTRL_2),
LP873X_REGULATOR("BUCK1", LP873X_BUCK_1, "buck1", lp873x_buck01_ops,
256, LP873X_REG_BUCK1_VOUT,
LP873X_BUCK1_VOUT_BUCK1_VSET, LP873X_REG_BUCK1_CTRL_1,
LP873X_BUCK1_CTRL_1_BUCK1_EN, 10000,
buck0_buck1_ranges, LP873X_REG_BUCK1_CTRL_2),
LP873X_REGULATOR("LDO0", LP873X_LDO_0, "ldo0", lp873x_ldo01_ops, 26,
LP873X_REG_LDO0_VOUT, LP873X_LDO0_VOUT_LDO0_VSET,
LP873X_REG_LDO0_CTRL,
LP873X_LDO0_CTRL_LDO0_EN, 0, ldo0_ldo1_ranges, 0xFF),
LP873X_REGULATOR("LDO1", LP873X_LDO_1, "ldo1", lp873x_ldo01_ops, 26,
LP873X_REG_LDO1_VOUT, LP873X_LDO1_VOUT_LDO1_VSET,
LP873X_REG_LDO1_CTRL,
LP873X_LDO1_CTRL_LDO1_EN, 0, ldo0_ldo1_ranges, 0xFF),
};
static int lp873x_regulator_probe(struct platform_device *pdev)
{
struct lp873x *lp873 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
int i;
platform_set_drvdata(pdev, lp873);
config.dev = &pdev->dev;
config.dev->of_node = lp873->dev->of_node;
config.driver_data = lp873;
config.regmap = lp873->regmap;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
rdev = devm_regulator_register(&pdev->dev, ®ulators[i].desc,
&config);
if (IS_ERR(rdev)) {
dev_err(lp873->dev, "failed to register %s regulator\n",
pdev->name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id lp873x_regulator_id_table[] = {
{ "lp873x-regulator", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, lp873x_regulator_id_table);
static struct platform_driver lp873x_regulator_driver = {
.driver = {
.name = "lp873x-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = lp873x_regulator_probe,
.id_table = lp873x_regulator_id_table,
};
module_platform_driver(lp873x_regulator_driver);
MODULE_AUTHOR("J Keerthy <[email protected]>");
MODULE_DESCRIPTION("LP873X voltage regulator driver");
MODULE_ALIAS("platform:lp873x-pmic");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/lp873x-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Device driver for regulators in Hi6421V530 IC
//
// Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
// http://www.hisilicon.com
// Copyright (c) <2017> Linaro Ltd.
// https://www.linaro.org
//
// Author: Wang Xiaoyin <[email protected]>
// Guodong Xu <[email protected]>
#include <linux/mfd/hi6421-pmic.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
/*
* struct hi6421v530_regulator_info - hi6421v530 regulator information
* @desc: regulator description
* @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep
* @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only
*/
struct hi6421v530_regulator_info {
struct regulator_desc rdesc;
u8 mode_mask;
u32 eco_microamp;
};
/* HI6421v530 regulators */
enum hi6421v530_regulator_id {
HI6421V530_LDO3,
HI6421V530_LDO9,
HI6421V530_LDO11,
HI6421V530_LDO15,
HI6421V530_LDO16,
};
static const unsigned int ldo_3_voltages[] = {
1800000, 1825000, 1850000, 1875000,
1900000, 1925000, 1950000, 1975000,
2000000, 2025000, 2050000, 2075000,
2100000, 2125000, 2150000, 2200000,
};
static const unsigned int ldo_9_11_voltages[] = {
1750000, 1800000, 1825000, 2800000,
2850000, 2950000, 3000000, 3300000,
};
static const unsigned int ldo_15_16_voltages[] = {
1750000, 1800000, 2400000, 2600000,
2700000, 2850000, 2950000, 3000000,
};
static const struct regulator_ops hi6421v530_ldo_ops;
#define HI6421V530_LDO_ENABLE_TIME (350)
/*
* _id - LDO id name string
* v_table - voltage table
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
* emask - enable mask
* odelay - off/on delay time in uS
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
#define HI6421V530_LDO(_ID, v_table, vreg, vmask, ereg, emask, \
odelay, ecomask, ecoamp) { \
.rdesc = { \
.name = #_ID, \
.of_match = of_match_ptr(#_ID), \
.regulators_node = of_match_ptr("regulators"), \
.ops = &hi6421v530_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421V530_##_ID, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(v_table), \
.volt_table = v_table, \
.vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
.enable_mask = emask, \
.enable_time = HI6421V530_LDO_ENABLE_TIME, \
.off_on_delay = odelay, \
}, \
.mode_mask = ecomask, \
.eco_microamp = ecoamp, \
}
/* HI6421V530 regulator information */
static struct hi6421v530_regulator_info hi6421v530_regulator_info[] = {
HI6421V530_LDO(LDO3, ldo_3_voltages, 0x061, 0xf, 0x060, 0x2,
20000, 0x6, 8000),
HI6421V530_LDO(LDO9, ldo_9_11_voltages, 0x06b, 0x7, 0x06a, 0x2,
40000, 0x6, 8000),
HI6421V530_LDO(LDO11, ldo_9_11_voltages, 0x06f, 0x7, 0x06e, 0x2,
40000, 0x6, 8000),
HI6421V530_LDO(LDO15, ldo_15_16_voltages, 0x077, 0x7, 0x076, 0x2,
40000, 0x6, 8000),
HI6421V530_LDO(LDO16, ldo_15_16_voltages, 0x079, 0x7, 0x078, 0x2,
40000, 0x6, 8000),
};
static unsigned int hi6421v530_regulator_ldo_get_mode(
struct regulator_dev *rdev)
{
struct hi6421v530_regulator_info *info;
unsigned int reg_val;
info = rdev_get_drvdata(rdev);
regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val);
if (reg_val & (info->mode_mask))
return REGULATOR_MODE_IDLE;
return REGULATOR_MODE_NORMAL;
}
static int hi6421v530_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421v530_regulator_info *info;
unsigned int new_mode;
info = rdev_get_drvdata(rdev);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
break;
case REGULATOR_MODE_IDLE:
new_mode = info->mode_mask;
break;
default:
return -EINVAL;
}
regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
info->mode_mask, new_mode);
return 0;
}
static const struct regulator_ops hi6421v530_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421v530_regulator_ldo_get_mode,
.set_mode = hi6421v530_regulator_ldo_set_mode,
};
static int hi6421v530_regulator_probe(struct platform_device *pdev)
{
struct hi6421_pmic *pmic;
struct regulator_dev *rdev;
struct regulator_config config = { };
unsigned int i;
pmic = dev_get_drvdata(pdev->dev.parent);
if (!pmic) {
dev_err(&pdev->dev, "no pmic in the regulator parent node\n");
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(hi6421v530_regulator_info); i++) {
config.dev = pdev->dev.parent;
config.regmap = pmic->regmap;
config.driver_data = &hi6421v530_regulator_info[i];
rdev = devm_regulator_register(&pdev->dev,
&hi6421v530_regulator_info[i].rdesc,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
hi6421v530_regulator_info[i].rdesc.name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id hi6421v530_regulator_table[] = {
{ .name = "hi6421v530-regulator" },
{},
};
MODULE_DEVICE_TABLE(platform, hi6421v530_regulator_table);
static struct platform_driver hi6421v530_regulator_driver = {
.id_table = hi6421v530_regulator_table,
.driver = {
.name = "hi6421v530-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = hi6421v530_regulator_probe,
};
module_platform_driver(hi6421v530_regulator_driver);
MODULE_AUTHOR("Wang Xiaoyin <[email protected]>");
MODULE_DESCRIPTION("Hi6421v530 regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/hi6421v530-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* max8973-regulator.c -- Maxim max8973A
*
* Regulator driver for MAXIM 8973A DC-DC step-down switching regulator.
*
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/max8973-regulator.h>
#include <linux/regulator/of_regulator.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
/* Register definitions */
#define MAX8973_VOUT 0x0
#define MAX8973_VOUT_DVS 0x1
#define MAX8973_CONTROL1 0x2
#define MAX8973_CONTROL2 0x3
#define MAX8973_CHIPID1 0x4
#define MAX8973_CHIPID2 0x5
#define MAX8973_MAX_VOUT_REG 2
/* MAX8973_VOUT */
#define MAX8973_VOUT_ENABLE BIT(7)
#define MAX8973_VOUT_MASK 0x7F
/* MAX8973_VOUT_DVS */
#define MAX8973_DVS_VOUT_MASK 0x7F
/* MAX8973_CONTROL1 */
#define MAX8973_SNS_ENABLE BIT(7)
#define MAX8973_FPWM_EN_M BIT(6)
#define MAX8973_NFSR_ENABLE BIT(5)
#define MAX8973_AD_ENABLE BIT(4)
#define MAX8973_BIAS_ENABLE BIT(3)
#define MAX8973_FREQSHIFT_9PER BIT(2)
#define MAX8973_RAMP_12mV_PER_US 0x0
#define MAX8973_RAMP_25mV_PER_US 0x1
#define MAX8973_RAMP_50mV_PER_US 0x2
#define MAX8973_RAMP_200mV_PER_US 0x3
#define MAX8973_RAMP_MASK 0x3
/* MAX8973_CONTROL2 */
#define MAX8973_WDTMR_ENABLE BIT(6)
#define MAX8973_DISCH_ENBABLE BIT(5)
#define MAX8973_FT_ENABLE BIT(4)
#define MAX77621_T_JUNCTION_120 BIT(7)
#define MAX8973_CKKADV_TRIP_MASK 0xC
#define MAX8973_CKKADV_TRIP_DISABLE 0xC
#define MAX8973_CKKADV_TRIP_75mV_PER_US 0x0
#define MAX8973_CKKADV_TRIP_150mV_PER_US 0x4
#define MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS 0x8
#define MAX8973_CONTROL_CLKADV_TRIP_MASK 0x00030000
#define MAX8973_INDUCTOR_MIN_30_PER 0x0
#define MAX8973_INDUCTOR_NOMINAL 0x1
#define MAX8973_INDUCTOR_PLUS_30_PER 0x2
#define MAX8973_INDUCTOR_PLUS_60_PER 0x3
#define MAX8973_CONTROL_INDUCTOR_VALUE_MASK 0x00300000
#define MAX8973_MIN_VOLATGE 606250
#define MAX8973_MAX_VOLATGE 1400000
#define MAX8973_VOLATGE_STEP 6250
#define MAX8973_BUCK_N_VOLTAGE 0x80
#define MAX77621_CHIPID_TJINT_S BIT(0)
#define MAX77621_NORMAL_OPERATING_TEMP 100000
#define MAX77621_TJINT_WARNING_TEMP_120 120000
#define MAX77621_TJINT_WARNING_TEMP_140 140000
enum device_id {
MAX8973,
MAX77621
};
/* Maxim 8973 chip information */
struct max8973_chip {
struct device *dev;
struct regulator_desc desc;
struct regmap *regmap;
bool enable_external_control;
int dvs_gpio;
int lru_index[MAX8973_MAX_VOUT_REG];
int curr_vout_val[MAX8973_MAX_VOUT_REG];
int curr_vout_reg;
int curr_gpio_val;
struct regulator_ops ops;
enum device_id id;
int junction_temp_warning;
int irq;
struct thermal_zone_device *tz_device;
};
/*
* find_voltage_set_register: Find new voltage configuration register (VOUT).
* The finding of the new VOUT register will be based on the LRU mechanism.
* Each VOUT register will have different voltage configured . This
* Function will look if any of the VOUT register have requested voltage set
* or not.
* - If it is already there then it will make that register as most
* recently used and return as found so that caller need not to set
* the VOUT register but need to set the proper gpios to select this
* VOUT register.
* - If requested voltage is not found then it will use the least
* recently mechanism to get new VOUT register for new configuration
* and will return not_found so that caller need to set new VOUT
* register and then gpios (both).
*/
static bool find_voltage_set_register(struct max8973_chip *tps,
int req_vsel, int *vout_reg, int *gpio_val)
{
int i;
bool found = false;
int new_vout_reg = tps->lru_index[MAX8973_MAX_VOUT_REG - 1];
int found_index = MAX8973_MAX_VOUT_REG - 1;
for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i) {
if (tps->curr_vout_val[tps->lru_index[i]] == req_vsel) {
new_vout_reg = tps->lru_index[i];
found_index = i;
found = true;
goto update_lru_index;
}
}
update_lru_index:
for (i = found_index; i > 0; i--)
tps->lru_index[i] = tps->lru_index[i - 1];
tps->lru_index[0] = new_vout_reg;
*gpio_val = new_vout_reg;
*vout_reg = MAX8973_VOUT + new_vout_reg;
return found;
}
static int max8973_dcdc_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8973_chip *max = rdev_get_drvdata(rdev);
unsigned int data;
int ret;
ret = regmap_read(max->regmap, max->curr_vout_reg, &data);
if (ret < 0) {
dev_err(max->dev, "register %d read failed, err = %d\n",
max->curr_vout_reg, ret);
return ret;
}
return data & MAX8973_VOUT_MASK;
}
static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev,
unsigned vsel)
{
struct max8973_chip *max = rdev_get_drvdata(rdev);
int ret;
bool found = false;
int vout_reg = max->curr_vout_reg;
int gpio_val = max->curr_gpio_val;
/*
* If gpios are available to select the VOUT register then least
* recently used register for new configuration.
*/
if (gpio_is_valid(max->dvs_gpio))
found = find_voltage_set_register(max, vsel,
&vout_reg, &gpio_val);
if (!found) {
ret = regmap_update_bits(max->regmap, vout_reg,
MAX8973_VOUT_MASK, vsel);
if (ret < 0) {
dev_err(max->dev, "register %d update failed, err %d\n",
vout_reg, ret);
return ret;
}
max->curr_vout_reg = vout_reg;
max->curr_vout_val[gpio_val] = vsel;
}
/* Select proper VOUT register vio gpios */
if (gpio_is_valid(max->dvs_gpio)) {
gpio_set_value_cansleep(max->dvs_gpio, gpio_val & 0x1);
max->curr_gpio_val = gpio_val;
}
return 0;
}
static int max8973_dcdc_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct max8973_chip *max = rdev_get_drvdata(rdev);
int ret;
int pwm;
/* Enable force PWM mode in FAST mode only. */
switch (mode) {
case REGULATOR_MODE_FAST:
pwm = MAX8973_FPWM_EN_M;
break;
case REGULATOR_MODE_NORMAL:
pwm = 0;
break;
default:
return -EINVAL;
}
ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1,
MAX8973_FPWM_EN_M, pwm);
if (ret < 0)
dev_err(max->dev, "register %d update failed, err %d\n",
MAX8973_CONTROL1, ret);
return ret;
}
static unsigned int max8973_dcdc_get_mode(struct regulator_dev *rdev)
{
struct max8973_chip *max = rdev_get_drvdata(rdev);
unsigned int data;
int ret;
ret = regmap_read(max->regmap, MAX8973_CONTROL1, &data);
if (ret < 0) {
dev_err(max->dev, "register %d read failed, err %d\n",
MAX8973_CONTROL1, ret);
return ret;
}
return (data & MAX8973_FPWM_EN_M) ?
REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
static int max8973_set_current_limit(struct regulator_dev *rdev,
int min_ua, int max_ua)
{
struct max8973_chip *max = rdev_get_drvdata(rdev);
unsigned int val;
int ret;
if (max_ua <= 9000000)
val = MAX8973_CKKADV_TRIP_75mV_PER_US;
else if (max_ua <= 12000000)
val = MAX8973_CKKADV_TRIP_150mV_PER_US;
else
val = MAX8973_CKKADV_TRIP_DISABLE;
ret = regmap_update_bits(max->regmap, MAX8973_CONTROL2,
MAX8973_CKKADV_TRIP_MASK, val);
if (ret < 0) {
dev_err(max->dev, "register %d update failed: %d\n",
MAX8973_CONTROL2, ret);
return ret;
}
return 0;
}
static int max8973_get_current_limit(struct regulator_dev *rdev)
{
struct max8973_chip *max = rdev_get_drvdata(rdev);
unsigned int control2;
int ret;
ret = regmap_read(max->regmap, MAX8973_CONTROL2, &control2);
if (ret < 0) {
dev_err(max->dev, "register %d read failed: %d\n",
MAX8973_CONTROL2, ret);
return ret;
}
switch (control2 & MAX8973_CKKADV_TRIP_MASK) {
case MAX8973_CKKADV_TRIP_DISABLE:
return 15000000;
case MAX8973_CKKADV_TRIP_150mV_PER_US:
return 12000000;
case MAX8973_CKKADV_TRIP_75mV_PER_US:
return 9000000;
default:
break;
}
return 9000000;
}
static const unsigned int max8973_buck_ramp_table[] = {
12000, 25000, 50000, 200000
};
static const struct regulator_ops max8973_dcdc_ops = {
.get_voltage_sel = max8973_dcdc_get_voltage_sel,
.set_voltage_sel = max8973_dcdc_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.set_mode = max8973_dcdc_set_mode,
.get_mode = max8973_dcdc_get_mode,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static int max8973_init_dcdc(struct max8973_chip *max,
struct max8973_regulator_platform_data *pdata)
{
int ret;
uint8_t control1 = 0;
uint8_t control2 = 0;
unsigned int data;
ret = regmap_read(max->regmap, MAX8973_CONTROL1, &data);
if (ret < 0) {
dev_err(max->dev, "register %d read failed, err = %d",
MAX8973_CONTROL1, ret);
return ret;
}
control1 = data & MAX8973_RAMP_MASK;
switch (control1) {
case MAX8973_RAMP_12mV_PER_US:
max->desc.ramp_delay = 12000;
break;
case MAX8973_RAMP_25mV_PER_US:
max->desc.ramp_delay = 25000;
break;
case MAX8973_RAMP_50mV_PER_US:
max->desc.ramp_delay = 50000;
break;
case MAX8973_RAMP_200mV_PER_US:
max->desc.ramp_delay = 200000;
break;
}
if (pdata->control_flags & MAX8973_CONTROL_REMOTE_SENSE_ENABLE)
control1 |= MAX8973_SNS_ENABLE;
if (!(pdata->control_flags & MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE))
control1 |= MAX8973_NFSR_ENABLE;
if (pdata->control_flags & MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE)
control1 |= MAX8973_AD_ENABLE;
if (pdata->control_flags & MAX8973_CONTROL_BIAS_ENABLE) {
control1 |= MAX8973_BIAS_ENABLE;
max->desc.enable_time = 20;
} else {
max->desc.enable_time = 240;
}
if (pdata->control_flags & MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE)
control1 |= MAX8973_FREQSHIFT_9PER;
if ((pdata->junction_temp_warning == MAX77621_TJINT_WARNING_TEMP_120) &&
(max->id == MAX77621))
control2 |= MAX77621_T_JUNCTION_120;
if (!(pdata->control_flags & MAX8973_CONTROL_PULL_DOWN_ENABLE))
control2 |= MAX8973_DISCH_ENBABLE;
/* Clock advance trip configuration */
switch (pdata->control_flags & MAX8973_CONTROL_CLKADV_TRIP_MASK) {
case MAX8973_CONTROL_CLKADV_TRIP_DISABLED:
control2 |= MAX8973_CKKADV_TRIP_DISABLE;
break;
case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US:
control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US;
break;
case MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US:
control2 |= MAX8973_CKKADV_TRIP_150mV_PER_US;
break;
case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US_HIST_DIS:
control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS;
break;
}
/* Configure inductor value */
switch (pdata->control_flags & MAX8973_CONTROL_INDUCTOR_VALUE_MASK) {
case MAX8973_CONTROL_INDUCTOR_VALUE_NOMINAL:
control2 |= MAX8973_INDUCTOR_NOMINAL;
break;
case MAX8973_CONTROL_INDUCTOR_VALUE_MINUS_30_PER:
control2 |= MAX8973_INDUCTOR_MIN_30_PER;
break;
case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_30_PER:
control2 |= MAX8973_INDUCTOR_PLUS_30_PER;
break;
case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_60_PER:
control2 |= MAX8973_INDUCTOR_PLUS_60_PER;
break;
}
ret = regmap_write(max->regmap, MAX8973_CONTROL1, control1);
if (ret < 0) {
dev_err(max->dev, "register %d write failed, err = %d",
MAX8973_CONTROL1, ret);
return ret;
}
ret = regmap_write(max->regmap, MAX8973_CONTROL2, control2);
if (ret < 0) {
dev_err(max->dev, "register %d write failed, err = %d",
MAX8973_CONTROL2, ret);
return ret;
}
/* If external control is enabled then disable EN bit */
if (max->enable_external_control && (max->id == MAX8973)) {
ret = regmap_update_bits(max->regmap, MAX8973_VOUT,
MAX8973_VOUT_ENABLE, 0);
if (ret < 0)
dev_err(max->dev, "register %d update failed, err = %d",
MAX8973_VOUT, ret);
}
return ret;
}
static int max8973_thermal_read_temp(struct thermal_zone_device *tz, int *temp)
{
struct max8973_chip *mchip = thermal_zone_device_priv(tz);
unsigned int val;
int ret;
ret = regmap_read(mchip->regmap, MAX8973_CHIPID1, &val);
if (ret < 0) {
dev_err(mchip->dev, "Failed to read register CHIPID1, %d", ret);
return ret;
}
/* +1 degC to trigger cool device */
if (val & MAX77621_CHIPID_TJINT_S)
*temp = mchip->junction_temp_warning + 1000;
else
*temp = MAX77621_NORMAL_OPERATING_TEMP;
return 0;
}
static irqreturn_t max8973_thermal_irq(int irq, void *data)
{
struct max8973_chip *mchip = data;
thermal_zone_device_update(mchip->tz_device,
THERMAL_EVENT_UNSPECIFIED);
return IRQ_HANDLED;
}
static const struct thermal_zone_device_ops max77621_tz_ops = {
.get_temp = max8973_thermal_read_temp,
};
static int max8973_thermal_init(struct max8973_chip *mchip)
{
struct thermal_zone_device *tzd;
struct irq_data *irq_data;
unsigned long irq_flags = 0;
int ret;
if (mchip->id != MAX77621)
return 0;
tzd = devm_thermal_of_zone_register(mchip->dev, 0, mchip,
&max77621_tz_ops);
if (IS_ERR(tzd)) {
ret = PTR_ERR(tzd);
dev_err(mchip->dev, "Failed to register thermal sensor: %d\n",
ret);
return ret;
}
if (mchip->irq <= 0)
return 0;
irq_data = irq_get_irq_data(mchip->irq);
if (irq_data)
irq_flags = irqd_get_trigger_type(irq_data);
ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL,
max8973_thermal_irq,
IRQF_ONESHOT | IRQF_SHARED | irq_flags,
dev_name(mchip->dev), mchip);
if (ret < 0) {
dev_err(mchip->dev, "Failed to request irq %d, %d\n",
mchip->irq, ret);
return ret;
}
return 0;
}
static const struct regmap_config max8973_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX8973_CHIPID2,
.cache_type = REGCACHE_RBTREE,
};
static struct max8973_regulator_platform_data *max8973_parse_dt(
struct device *dev)
{
struct max8973_regulator_platform_data *pdata;
struct device_node *np = dev->of_node;
int ret;
u32 pval;
bool etr_enable;
bool etr_sensitivity_high;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->enable_ext_control = of_property_read_bool(np,
"maxim,externally-enable");
pdata->dvs_gpio = of_get_named_gpio(np, "maxim,dvs-gpio", 0);
ret = of_property_read_u32(np, "maxim,dvs-default-state", &pval);
if (!ret)
pdata->dvs_def_state = pval;
if (of_property_read_bool(np, "maxim,enable-remote-sense"))
pdata->control_flags |= MAX8973_CONTROL_REMOTE_SENSE_ENABLE;
if (of_property_read_bool(np, "maxim,enable-falling-slew-rate"))
pdata->control_flags |=
MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE;
if (of_property_read_bool(np, "maxim,enable-active-discharge"))
pdata->control_flags |=
MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE;
if (of_property_read_bool(np, "maxim,enable-frequency-shift"))
pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
if (of_property_read_bool(np, "maxim,enable-bias-control"))
pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
etr_enable = of_property_read_bool(np, "maxim,enable-etr");
etr_sensitivity_high = of_property_read_bool(np,
"maxim,enable-high-etr-sensitivity");
if (etr_sensitivity_high)
etr_enable = true;
if (etr_enable) {
if (etr_sensitivity_high)
pdata->control_flags |=
MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US;
else
pdata->control_flags |=
MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US;
} else {
pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED;
}
pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_140;
ret = of_property_read_u32(np, "junction-warn-millicelsius", &pval);
if (!ret && (pval <= MAX77621_TJINT_WARNING_TEMP_120))
pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_120;
return pdata;
}
static const struct of_device_id of_max8973_match_tbl[] = {
{ .compatible = "maxim,max8973", .data = (void *)MAX8973, },
{ .compatible = "maxim,max77621", .data = (void *)MAX77621, },
{ },
};
MODULE_DEVICE_TABLE(of, of_max8973_match_tbl);
static int max8973_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct max8973_regulator_platform_data *pdata;
struct regulator_init_data *ridata;
struct regulator_config config = { };
struct regulator_dev *rdev;
struct max8973_chip *max;
bool pdata_from_dt = false;
unsigned int chip_id;
struct gpio_desc *gpiod;
enum gpiod_flags gflags;
int ret;
pdata = dev_get_platdata(&client->dev);
if (!pdata && client->dev.of_node) {
pdata = max8973_parse_dt(&client->dev);
pdata_from_dt = true;
}
if (!pdata) {
dev_err(&client->dev, "No Platform data");
return -EIO;
}
if (pdata->dvs_gpio == -EPROBE_DEFER)
return -EPROBE_DEFER;
max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
if (!max)
return -ENOMEM;
max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
if (IS_ERR(max->regmap)) {
ret = PTR_ERR(max->regmap);
dev_err(&client->dev, "regmap init failed, err %d\n", ret);
return ret;
}
if (client->dev.of_node) {
const struct of_device_id *match;
match = of_match_device(of_match_ptr(of_max8973_match_tbl),
&client->dev);
if (!match)
return -ENODATA;
max->id = (u32)((uintptr_t)match->data);
} else {
max->id = id->driver_data;
}
ret = regmap_read(max->regmap, MAX8973_CHIPID1, &chip_id);
if (ret < 0) {
dev_err(&client->dev, "register CHIPID1 read failed, %d", ret);
return ret;
}
dev_info(&client->dev, "CHIP-ID OTP: 0x%02x ID_M: 0x%02x\n",
(chip_id >> 4) & 0xF, (chip_id >> 1) & 0x7);
i2c_set_clientdata(client, max);
max->ops = max8973_dcdc_ops;
max->dev = &client->dev;
max->desc.name = id->name;
max->desc.id = 0;
max->desc.ops = &max->ops;
max->desc.type = REGULATOR_VOLTAGE;
max->desc.owner = THIS_MODULE;
max->desc.min_uV = MAX8973_MIN_VOLATGE;
max->desc.uV_step = MAX8973_VOLATGE_STEP;
max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE;
max->desc.ramp_reg = MAX8973_CONTROL1;
max->desc.ramp_mask = MAX8973_RAMP_MASK;
max->desc.ramp_delay_table = max8973_buck_ramp_table;
max->desc.n_ramp_values = ARRAY_SIZE(max8973_buck_ramp_table);
max->dvs_gpio = (pdata->dvs_gpio) ? pdata->dvs_gpio : -EINVAL;
max->enable_external_control = pdata->enable_ext_control;
max->curr_gpio_val = pdata->dvs_def_state;
max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
max->junction_temp_warning = pdata->junction_temp_warning;
max->lru_index[0] = max->curr_vout_reg;
if (gpio_is_valid(max->dvs_gpio)) {
int gpio_flags;
int i;
gpio_flags = (pdata->dvs_def_state) ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
ret = devm_gpio_request_one(&client->dev, max->dvs_gpio,
gpio_flags, "max8973-dvs");
if (ret) {
dev_err(&client->dev,
"gpio_request for gpio %d failed, err = %d\n",
max->dvs_gpio, ret);
return ret;
}
/*
* Initialize the lru index with vout_reg id
* The index 0 will be most recently used and
* set with the max->curr_vout_reg */
for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i)
max->lru_index[i] = i;
max->lru_index[0] = max->curr_vout_reg;
max->lru_index[max->curr_vout_reg] = 0;
} else {
/*
* If there is no DVS GPIO, the VOUT register
* address is fixed.
*/
max->ops.set_voltage_sel = regulator_set_voltage_sel_regmap;
max->ops.get_voltage_sel = regulator_get_voltage_sel_regmap;
max->desc.vsel_reg = max->curr_vout_reg;
max->desc.vsel_mask = MAX8973_VOUT_MASK;
}
if (pdata_from_dt)
pdata->reg_init_data = of_get_regulator_init_data(&client->dev,
client->dev.of_node, &max->desc);
ridata = pdata->reg_init_data;
switch (max->id) {
case MAX8973:
if (!pdata->enable_ext_control) {
max->desc.enable_reg = MAX8973_VOUT;
max->desc.enable_mask = MAX8973_VOUT_ENABLE;
max->ops.enable = regulator_enable_regmap;
max->ops.disable = regulator_disable_regmap;
max->ops.is_enabled = regulator_is_enabled_regmap;
break;
}
if (ridata && (ridata->constraints.always_on ||
ridata->constraints.boot_on))
gflags = GPIOD_OUT_HIGH;
else
gflags = GPIOD_OUT_LOW;
gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
gpiod = devm_gpiod_get_optional(&client->dev,
"maxim,enable",
gflags);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
if (gpiod) {
config.ena_gpiod = gpiod;
max->enable_external_control = true;
}
break;
case MAX77621:
/*
* We do not let the core switch this regulator on/off,
* we just leave it on.
*/
gpiod = devm_gpiod_get_optional(&client->dev,
"maxim,enable",
GPIOD_OUT_HIGH);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
if (gpiod)
max->enable_external_control = true;
max->desc.enable_reg = MAX8973_VOUT;
max->desc.enable_mask = MAX8973_VOUT_ENABLE;
max->ops.enable = regulator_enable_regmap;
max->ops.disable = regulator_disable_regmap;
max->ops.is_enabled = regulator_is_enabled_regmap;
max->ops.set_current_limit = max8973_set_current_limit;
max->ops.get_current_limit = max8973_get_current_limit;
break;
default:
break;
}
ret = max8973_init_dcdc(max, pdata);
if (ret < 0) {
dev_err(max->dev, "Max8973 Init failed, err = %d\n", ret);
return ret;
}
config.dev = &client->dev;
config.init_data = pdata->reg_init_data;
config.driver_data = max;
config.of_node = client->dev.of_node;
config.regmap = max->regmap;
/*
* Register the regulators
* Turn the GPIO descriptor over to the regulator core for
* lifecycle management if we pass an ena_gpiod.
*/
if (config.ena_gpiod)
devm_gpiod_unhinge(&client->dev, config.ena_gpiod);
rdev = devm_regulator_register(&client->dev, &max->desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(max->dev, "regulator register failed, err %d\n", ret);
return ret;
}
max8973_thermal_init(max);
return 0;
}
static const struct i2c_device_id max8973_id[] = {
{.name = "max8973", .driver_data = MAX8973},
{.name = "max77621", .driver_data = MAX77621},
{},
};
MODULE_DEVICE_TABLE(i2c, max8973_id);
static struct i2c_driver max8973_i2c_driver = {
.driver = {
.name = "max8973",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_max8973_match_tbl,
},
.probe = max8973_probe,
.id_table = max8973_id,
};
static int __init max8973_init(void)
{
return i2c_add_driver(&max8973_i2c_driver);
}
subsys_initcall(max8973_init);
static void __exit max8973_cleanup(void)
{
i2c_del_driver(&max8973_i2c_driver);
}
module_exit(max8973_cleanup);
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_DESCRIPTION("MAX8973 voltage regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/max8973-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// DA9121 Single-channel dual-phase 10A buck converter
//
// Copyright (C) 2020 Axis Communications AB
//
// DA9130 Single-channel dual-phase 10A buck converter (Automotive)
// DA9217 Single-channel dual-phase 6A buck converter
// DA9122 Dual-channel single-phase 5A buck converter
// DA9131 Dual-channel single-phase 5A buck converter (Automotive)
// DA9220 Dual-channel single-phase 3A buck converter
// DA9132 Dual-channel single-phase 3A buck converter (Automotive)
//
// Copyright (C) 2020 Dialog Semiconductor
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/regulator/da9121.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include "da9121-regulator.h"
/* Chip data */
struct da9121 {
struct device *dev;
struct delayed_work work;
struct da9121_pdata *pdata;
struct regmap *regmap;
struct regulator_dev *rdev[DA9121_IDX_MAX];
unsigned int persistent[2];
unsigned int passive_delay;
int chip_irq;
int variant_id;
int subvariant_id;
};
/* Define ranges for different variants, enabling translation to/from
* registers. Maximums give scope to allow for transients.
*/
struct da9121_range {
int val_min;
int val_max;
int val_stp;
int reg_min;
int reg_max;
};
static struct da9121_range da9121_10A_2phase_current = {
.val_min = 7000000,
.val_max = 20000000,
.val_stp = 1000000,
.reg_min = 1,
.reg_max = 14,
};
static struct da9121_range da9121_6A_2phase_current = {
.val_min = 7000000,
.val_max = 12000000,
.val_stp = 1000000,
.reg_min = 1,
.reg_max = 6,
};
static struct da9121_range da9121_5A_1phase_current = {
.val_min = 3500000,
.val_max = 10000000,
.val_stp = 500000,
.reg_min = 1,
.reg_max = 14,
};
static struct da9121_range da9121_3A_1phase_current = {
.val_min = 3500000,
.val_max = 6000000,
.val_stp = 500000,
.reg_min = 1,
.reg_max = 6,
};
static struct da9121_range da914x_40A_4phase_current = {
.val_min = 26000000,
.val_max = 78000000,
.val_stp = 4000000,
.reg_min = 1,
.reg_max = 14,
};
static struct da9121_range da914x_20A_2phase_current = {
.val_min = 13000000,
.val_max = 39000000,
.val_stp = 2000000,
.reg_min = 1,
.reg_max = 14,
};
struct da9121_variant_info {
int num_bucks;
int num_phases;
struct da9121_range *current_range;
};
static const struct da9121_variant_info variant_parameters[] = {
{ 1, 2, &da9121_10A_2phase_current }, //DA9121_TYPE_DA9121_DA9130
{ 2, 1, &da9121_3A_1phase_current }, //DA9121_TYPE_DA9220_DA9132
{ 2, 1, &da9121_5A_1phase_current }, //DA9121_TYPE_DA9122_DA9131
{ 1, 2, &da9121_6A_2phase_current }, //DA9121_TYPE_DA9217
{ 1, 4, &da914x_40A_4phase_current }, //DA9121_TYPE_DA9141
{ 1, 2, &da914x_20A_2phase_current }, //DA9121_TYPE_DA9142
};
struct da9121_field {
unsigned int reg;
unsigned int msk;
};
static const struct da9121_field da9121_current_field[2] = {
{ DA9121_REG_BUCK_BUCK1_2, DA9121_MASK_BUCK_BUCKx_2_CHx_ILIM },
{ DA9xxx_REG_BUCK_BUCK2_2, DA9121_MASK_BUCK_BUCKx_2_CHx_ILIM },
};
static const struct da9121_field da9121_mode_field[2] = {
{ DA9121_REG_BUCK_BUCK1_4, DA9121_MASK_BUCK_BUCKx_4_CHx_A_MODE },
{ DA9xxx_REG_BUCK_BUCK2_4, DA9121_MASK_BUCK_BUCKx_4_CHx_A_MODE },
};
struct status_event_data {
int buck_id; /* 0=core, 1/2-buck */
int reg_index; /* index for status/event/mask register selection */
int status_bit; /* bit masks... */
int event_bit;
int mask_bit;
unsigned long notification; /* Notification for status inception */
char *warn; /* if NULL, notify - otherwise dev_warn this string */
};
#define DA9121_STATUS(id, bank, name, notification, warning) \
{ id, bank, \
DA9121_MASK_SYS_STATUS_##bank##_##name, \
DA9121_MASK_SYS_EVENT_##bank##_E_##name, \
DA9121_MASK_SYS_MASK_##bank##_M_##name, \
notification, warning }
/* For second buck related event bits that are specific to DA9122, DA9220 variants */
#define DA9xxx_STATUS(id, bank, name, notification, warning) \
{ id, bank, \
DA9xxx_MASK_SYS_STATUS_##bank##_##name, \
DA9xxx_MASK_SYS_EVENT_##bank##_E_##name, \
DA9xxx_MASK_SYS_MASK_##bank##_M_##name, \
notification, warning }
/* The status signals that may need servicing, depending on device variant.
* After assertion, they persist; so event is notified, the IRQ disabled,
* and status polled until clear again and IRQ is reenabled.
*
* SG/PG1/PG2 should be set when device first powers up and should never
* re-occur. When this driver starts, it is expected that these will have
* self-cleared for when the IRQs are enabled, so these should never be seen.
* If seen, the implication is that the device has reset.
*
* GPIO0/1/2 are not configured for use by default, so should not be seen.
*/
static const struct status_event_data status_event_handling[] = {
DA9xxx_STATUS(0, 0, SG, 0, "Handled E_SG\n"),
DA9121_STATUS(0, 0, TEMP_CRIT, (REGULATOR_EVENT_OVER_TEMP|REGULATOR_EVENT_DISABLE), NULL),
DA9121_STATUS(0, 0, TEMP_WARN, REGULATOR_EVENT_OVER_TEMP, NULL),
DA9121_STATUS(1, 1, PG1, 0, "Handled E_PG1\n"),
DA9121_STATUS(1, 1, OV1, REGULATOR_EVENT_REGULATION_OUT, NULL),
DA9121_STATUS(1, 1, UV1, REGULATOR_EVENT_UNDER_VOLTAGE, NULL),
DA9121_STATUS(1, 1, OC1, REGULATOR_EVENT_OVER_CURRENT, NULL),
DA9xxx_STATUS(2, 1, PG2, 0, "Handled E_PG2\n"),
DA9xxx_STATUS(2, 1, OV2, REGULATOR_EVENT_REGULATION_OUT, NULL),
DA9xxx_STATUS(2, 1, UV2, REGULATOR_EVENT_UNDER_VOLTAGE, NULL),
DA9xxx_STATUS(2, 1, OC2, REGULATOR_EVENT_OVER_CURRENT, NULL),
DA9121_STATUS(0, 2, GPIO0, 0, "Handled E_GPIO0\n"),
DA9121_STATUS(0, 2, GPIO1, 0, "Handled E_GPIO1\n"),
DA9121_STATUS(0, 2, GPIO2, 0, "Handled E_GPIO2\n"),
};
static int da9121_get_current_limit(struct regulator_dev *rdev)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
struct da9121_range *range =
variant_parameters[chip->variant_id].current_range;
unsigned int val = 0;
int ret = 0;
ret = regmap_read(chip->regmap, da9121_current_field[id].reg, &val);
if (ret < 0) {
dev_err(chip->dev, "Cannot read BUCK register: %d\n", ret);
goto error;
}
if (val < range->reg_min) {
ret = -EACCES;
goto error;
}
if (val > range->reg_max) {
ret = -EINVAL;
goto error;
}
return range->val_min + (range->val_stp * (val - range->reg_min));
error:
return ret;
}
static int da9121_ceiling_selector(struct regulator_dev *rdev,
int min, int max,
unsigned int *selector)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
struct da9121_range *range =
variant_parameters[chip->variant_id].current_range;
unsigned int level;
unsigned int i = 0;
unsigned int sel = 0;
int ret = 0;
if (range->val_min > max || range->val_max < min) {
dev_err(chip->dev,
"Requested current out of regulator capability\n");
ret = -EINVAL;
goto error;
}
level = range->val_max;
for (i = range->reg_max; i >= range->reg_min; i--) {
if (level <= max) {
sel = i;
break;
}
level -= range->val_stp;
}
if (level < min) {
dev_err(chip->dev,
"Best match falls below minimum requested current\n");
ret = -EINVAL;
goto error;
}
*selector = sel;
error:
return ret;
}
static int da9121_set_current_limit(struct regulator_dev *rdev,
int min_ua, int max_ua)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
struct da9121_range *range =
variant_parameters[chip->variant_id].current_range;
unsigned int sel = 0;
int ret = 0;
if (min_ua < range->val_min ||
max_ua > range->val_max) {
ret = -EINVAL;
goto error;
}
if (rdev->desc->ops->is_enabled(rdev)) {
ret = -EBUSY;
goto error;
}
ret = da9121_ceiling_selector(rdev, min_ua, max_ua, &sel);
if (ret < 0)
goto error;
ret = regmap_update_bits(chip->regmap,
da9121_current_field[id].reg,
da9121_current_field[id].msk,
(unsigned int)sel);
if (ret < 0)
dev_err(chip->dev, "Cannot update BUCK current limit, err: %d\n", ret);
error:
return ret;
}
static unsigned int da9121_map_mode(unsigned int mode)
{
switch (mode) {
case DA9121_BUCK_MODE_FORCE_PWM:
return REGULATOR_MODE_FAST;
case DA9121_BUCK_MODE_FORCE_PWM_SHEDDING:
return REGULATOR_MODE_NORMAL;
case DA9121_BUCK_MODE_AUTO:
return REGULATOR_MODE_IDLE;
case DA9121_BUCK_MODE_FORCE_PFM:
return REGULATOR_MODE_STANDBY;
default:
return REGULATOR_MODE_INVALID;
}
}
static int da9121_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
unsigned int val;
switch (mode) {
case REGULATOR_MODE_FAST:
val = DA9121_BUCK_MODE_FORCE_PWM;
break;
case REGULATOR_MODE_NORMAL:
val = DA9121_BUCK_MODE_FORCE_PWM_SHEDDING;
break;
case REGULATOR_MODE_IDLE:
val = DA9121_BUCK_MODE_AUTO;
break;
case REGULATOR_MODE_STANDBY:
val = DA9121_BUCK_MODE_FORCE_PFM;
break;
default:
return -EINVAL;
}
return regmap_update_bits(chip->regmap,
da9121_mode_field[id].reg,
da9121_mode_field[id].msk,
val);
}
static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
unsigned int val, mode;
int ret = 0;
ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
if (ret < 0) {
dev_err(chip->dev, "Cannot read BUCK register: %d\n", ret);
return -EINVAL;
}
mode = da9121_map_mode(val & da9121_mode_field[id].msk);
if (mode == REGULATOR_MODE_INVALID)
return -EINVAL;
return mode;
}
static const struct regulator_ops da9121_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.get_current_limit = da9121_get_current_limit,
.set_current_limit = da9121_set_current_limit,
.set_mode = da9121_buck_set_mode,
.get_mode = da9121_buck_get_mode,
};
static struct of_regulator_match da9121_matches[] = {
[DA9121_IDX_BUCK1] = { .name = "buck1" },
[DA9121_IDX_BUCK2] = { .name = "buck2" },
};
static int da9121_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct da9121 *chip = config->driver_data;
struct da9121_pdata *pdata;
struct gpio_desc *ena_gpiod;
if (chip->pdata == NULL) {
pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
} else {
pdata = chip->pdata;
}
pdata->num_buck++;
if (pdata->num_buck > variant_parameters[chip->variant_id].num_bucks) {
dev_err(chip->dev, "Error: excessive regulators for device\n");
return -ENODEV;
}
ena_gpiod = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0,
GPIOD_OUT_HIGH |
GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"da9121-enable");
if (!IS_ERR(ena_gpiod))
config->ena_gpiod = ena_gpiod;
if (variant_parameters[chip->variant_id].num_bucks == 2) {
uint32_t ripple_cancel;
uint32_t ripple_reg;
int ret;
if (of_property_read_u32(da9121_matches[pdata->num_buck-1].of_node,
"dlg,ripple-cancel", &ripple_cancel)) {
if (pdata->num_buck > 1)
ripple_reg = DA9xxx_REG_BUCK_BUCK2_7;
else
ripple_reg = DA9121_REG_BUCK_BUCK1_7;
ret = regmap_update_bits(chip->regmap, ripple_reg,
DA9xxx_MASK_BUCK_BUCKx_7_CHx_RIPPLE_CANCEL,
ripple_cancel);
if (ret < 0)
dev_err(chip->dev, "Cannot set ripple mode, err: %d\n", ret);
}
}
return 0;
}
#define DA9121_MIN_MV 300
#define DA9121_MAX_MV 1900
#define DA9121_STEP_MV 10
#define DA9121_MIN_SEL (DA9121_MIN_MV / DA9121_STEP_MV)
#define DA9121_N_VOLTAGES (((DA9121_MAX_MV - DA9121_MIN_MV) / DA9121_STEP_MV) \
+ 1 + DA9121_MIN_SEL)
static const struct regulator_desc da9121_reg = {
.id = DA9121_IDX_BUCK1,
.name = "da9121",
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA9121_N_VOLTAGES,
.min_uV = DA9121_MIN_MV * 1000,
.uV_step = DA9121_STEP_MV * 1000,
.linear_min_sel = DA9121_MIN_SEL,
.vsel_reg = DA9121_REG_BUCK_BUCK1_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
/* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */
.ramp_delay = 20000,
/* tBUCK_EN */
.enable_time = 20,
};
static const struct regulator_desc da9220_reg[2] = {
{
.id = DA9121_IDX_BUCK1,
.name = "DA9220/DA9132 BUCK1",
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA9121_N_VOLTAGES,
.min_uV = DA9121_MIN_MV * 1000,
.uV_step = DA9121_STEP_MV * 1000,
.linear_min_sel = DA9121_MIN_SEL,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
.vsel_reg = DA9121_REG_BUCK_BUCK1_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
},
{
.id = DA9121_IDX_BUCK2,
.name = "DA9220/DA9132 BUCK2",
.of_match = "buck2",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA9121_N_VOLTAGES,
.min_uV = DA9121_MIN_MV * 1000,
.uV_step = DA9121_STEP_MV * 1000,
.linear_min_sel = DA9121_MIN_SEL,
.enable_reg = DA9xxx_REG_BUCK_BUCK2_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
.vsel_reg = DA9xxx_REG_BUCK_BUCK2_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
}
};
static const struct regulator_desc da9122_reg[2] = {
{
.id = DA9121_IDX_BUCK1,
.name = "DA9122/DA9131 BUCK1",
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA9121_N_VOLTAGES,
.min_uV = DA9121_MIN_MV * 1000,
.uV_step = DA9121_STEP_MV * 1000,
.linear_min_sel = DA9121_MIN_SEL,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
.vsel_reg = DA9121_REG_BUCK_BUCK1_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
},
{
.id = DA9121_IDX_BUCK2,
.name = "DA9122/DA9131 BUCK2",
.of_match = "buck2",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA9121_N_VOLTAGES,
.min_uV = DA9121_MIN_MV * 1000,
.uV_step = DA9121_STEP_MV * 1000,
.linear_min_sel = DA9121_MIN_SEL,
.enable_reg = DA9xxx_REG_BUCK_BUCK2_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
.vsel_reg = DA9xxx_REG_BUCK_BUCK2_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
}
};
static const struct regulator_desc da9217_reg = {
.id = DA9121_IDX_BUCK1,
.name = "DA9217 BUCK1",
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA9121_N_VOLTAGES,
.min_uV = DA9121_MIN_MV * 1000,
.uV_step = DA9121_STEP_MV * 1000,
.linear_min_sel = DA9121_MIN_SEL,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
.vsel_reg = DA9121_REG_BUCK_BUCK1_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
};
#define DA914X_MIN_MV 500
#define DA914X_MAX_MV 1300
#define DA914X_STEP_MV 10
#define DA914X_MIN_SEL (DA914X_MIN_MV / DA914X_STEP_MV)
#define DA914X_N_VOLTAGES (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \
+ 1 + DA914X_MIN_SEL)
static const struct regulator_desc da9141_reg = {
.id = DA9121_IDX_BUCK1,
.name = "DA9141",
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA914X_N_VOLTAGES,
.min_uV = DA914X_MIN_MV * 1000,
.uV_step = DA914X_STEP_MV * 1000,
.linear_min_sel = DA914X_MIN_SEL,
.vsel_reg = DA9121_REG_BUCK_BUCK1_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
};
static const struct regulator_desc da9142_reg = {
.id = DA9121_IDX_BUCK1,
.name = "DA9142 BUCK1",
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
.regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = DA914X_N_VOLTAGES,
.min_uV = DA914X_MIN_MV * 1000,
.uV_step = DA914X_STEP_MV * 1000,
.linear_min_sel = DA914X_MIN_SEL,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
.vsel_reg = DA9121_REG_BUCK_BUCK1_5,
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
};
static const struct regulator_desc *local_da9121_regulators[][DA9121_IDX_MAX] = {
[DA9121_TYPE_DA9121_DA9130] = { &da9121_reg, NULL },
[DA9121_TYPE_DA9220_DA9132] = { &da9220_reg[0], &da9220_reg[1] },
[DA9121_TYPE_DA9122_DA9131] = { &da9122_reg[0], &da9122_reg[1] },
[DA9121_TYPE_DA9217] = { &da9217_reg, NULL },
[DA9121_TYPE_DA9141] = { &da9141_reg, NULL },
[DA9121_TYPE_DA9142] = { &da9142_reg, NULL },
};
static void da9121_status_poll_on(struct work_struct *work)
{
struct da9121 *chip = container_of(work, struct da9121, work.work);
int status[3] = {0};
int clear[3] = {0};
unsigned long delay;
int i;
int ret;
ret = regmap_bulk_read(chip->regmap, DA9121_REG_SYS_STATUS_0, status, 2);
if (ret < 0) {
dev_err(chip->dev,
"Failed to read STATUS registers: %d\n", ret);
goto error;
}
/* Possible events are tested to be within range for the variant, potentially
* masked by the IRQ handler (not just warned about), as having been masked,
* and the respective state cleared - then flagged to unmask for next IRQ.
*/
for (i = 0; i < ARRAY_SIZE(status_event_handling); i++) {
const struct status_event_data *item = &status_event_handling[i];
int reg_idx = item->reg_index;
bool relevant = (item->buck_id <= variant_parameters[chip->variant_id].num_bucks);
bool supported = (item->warn == NULL);
bool persisting = (chip->persistent[reg_idx] & item->event_bit);
bool now_cleared = !(status[reg_idx] & item->status_bit);
if (relevant && supported && persisting && now_cleared) {
clear[reg_idx] |= item->mask_bit;
chip->persistent[reg_idx] &= ~item->event_bit;
}
}
for (i = 0; i < 2; i++) {
if (clear[i]) {
unsigned int reg = DA9121_REG_SYS_MASK_0 + i;
unsigned int mbit = clear[i];
ret = regmap_update_bits(chip->regmap, reg, mbit, 0);
if (ret < 0) {
dev_err(chip->dev,
"Failed to unmask 0x%02x %d\n",
reg, ret);
goto error;
}
}
}
if (chip->persistent[0] | chip->persistent[1]) {
delay = msecs_to_jiffies(chip->passive_delay);
queue_delayed_work(system_freezable_wq, &chip->work, delay);
}
error:
return;
}
static irqreturn_t da9121_irq_handler(int irq, void *data)
{
struct da9121 *chip = data;
struct regulator_dev *rdev;
int event[3] = {0};
int handled[3] = {0};
int mask[3] = {0};
int ret = IRQ_NONE;
int i;
int err;
err = regmap_bulk_read(chip->regmap, DA9121_REG_SYS_EVENT_0, event, 3);
if (err < 0) {
dev_err(chip->dev, "Failed to read EVENT registers %d\n", err);
ret = IRQ_NONE;
goto error;
}
err = regmap_bulk_read(chip->regmap, DA9121_REG_SYS_MASK_0, mask, 3);
if (err < 0) {
dev_err(chip->dev,
"Failed to read MASK registers: %d\n", ret);
ret = IRQ_NONE;
goto error;
}
rdev = chip->rdev[DA9121_IDX_BUCK1];
/* Possible events are tested to be within range for the variant, currently
* enabled, and having triggered this IRQ. The event may then be notified,
* or a warning given for unexpected events - those from device POR, and
* currently unsupported GPIO configurations.
*/
for (i = 0; i < ARRAY_SIZE(status_event_handling); i++) {
const struct status_event_data *item = &status_event_handling[i];
int reg_idx = item->reg_index;
bool relevant = (item->buck_id <= variant_parameters[chip->variant_id].num_bucks);
bool enabled = !(mask[reg_idx] & item->mask_bit);
bool active = (event[reg_idx] & item->event_bit);
bool notify = (item->warn == NULL);
if (relevant && enabled && active) {
if (notify) {
chip->persistent[reg_idx] |= item->event_bit;
regulator_notifier_call_chain(rdev, item->notification, NULL);
} else {
dev_warn(chip->dev, item->warn);
handled[reg_idx] |= item->event_bit;
ret = IRQ_HANDLED;
}
}
}
for (i = 0; i < 3; i++) {
if (event[i] != handled[i]) {
dev_warn(chip->dev,
"Unhandled event(s) in bank%d 0x%02x\n", i,
event[i] ^ handled[i]);
}
}
/* Mask the interrupts for persistent events OV, OC, UV, WARN, CRIT */
for (i = 0; i < 2; i++) {
if (handled[i]) {
unsigned int reg = DA9121_REG_SYS_MASK_0 + i;
unsigned int mbit = handled[i];
err = regmap_update_bits(chip->regmap, reg, mbit, mbit);
if (err < 0) {
dev_err(chip->dev,
"Failed to mask 0x%02x interrupt %d\n",
reg, err);
ret = IRQ_NONE;
goto error;
}
}
}
/* clear the events */
if (handled[0] | handled[1] | handled[2]) {
err = regmap_bulk_write(chip->regmap, DA9121_REG_SYS_EVENT_0, handled, 3);
if (err < 0) {
dev_err(chip->dev, "Fail to write EVENTs %d\n", err);
ret = IRQ_NONE;
goto error;
}
}
queue_delayed_work(system_freezable_wq, &chip->work, 0);
error:
return ret;
}
static int da9121_set_regulator_config(struct da9121 *chip)
{
struct regulator_config config = { };
unsigned int max_matches = variant_parameters[chip->variant_id].num_bucks;
int ret = 0;
int i;
for (i = 0; i < max_matches; i++) {
const struct regulator_desc *regl_desc =
local_da9121_regulators[chip->variant_id][i];
config.dev = chip->dev;
config.driver_data = chip;
config.regmap = chip->regmap;
chip->rdev[i] = devm_regulator_register(chip->dev,
regl_desc, &config);
if (IS_ERR(chip->rdev[i])) {
dev_err(chip->dev, "Failed to register regulator %s, %d/%d\n",
regl_desc->name, (i+1), max_matches);
ret = PTR_ERR(chip->rdev[i]);
goto error;
}
}
error:
return ret;
}
/* DA9121 chip register model */
static const struct regmap_range da9121_1ch_readable_ranges[] = {
regmap_reg_range(DA9121_REG_SYS_STATUS_0, DA9121_REG_SYS_MASK_3),
regmap_reg_range(DA9121_REG_SYS_CONFIG_2, DA9121_REG_SYS_CONFIG_3),
regmap_reg_range(DA9121_REG_SYS_GPIO0_0, DA9121_REG_SYS_GPIO2_1),
regmap_reg_range(DA9121_REG_BUCK_BUCK1_0, DA9121_REG_BUCK_BUCK1_6),
regmap_reg_range(DA9121_REG_OTP_DEVICE_ID, DA9121_REG_OTP_CONFIG_ID),
};
static const struct regmap_access_table da9121_1ch_readable_table = {
.yes_ranges = da9121_1ch_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(da9121_1ch_readable_ranges),
};
static const struct regmap_range da9121_2ch_readable_ranges[] = {
regmap_reg_range(DA9121_REG_SYS_STATUS_0, DA9121_REG_SYS_MASK_3),
regmap_reg_range(DA9121_REG_SYS_CONFIG_2, DA9121_REG_SYS_CONFIG_3),
regmap_reg_range(DA9121_REG_SYS_GPIO0_0, DA9121_REG_SYS_GPIO2_1),
regmap_reg_range(DA9121_REG_BUCK_BUCK1_0, DA9121_REG_BUCK_BUCK1_7),
regmap_reg_range(DA9xxx_REG_BUCK_BUCK2_0, DA9xxx_REG_BUCK_BUCK2_7),
regmap_reg_range(DA9121_REG_OTP_DEVICE_ID, DA9121_REG_OTP_CONFIG_ID),
};
static const struct regmap_access_table da9121_2ch_readable_table = {
.yes_ranges = da9121_2ch_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(da9121_2ch_readable_ranges),
};
static const struct regmap_range da9121_1ch_writeable_ranges[] = {
regmap_reg_range(DA9121_REG_SYS_EVENT_0, DA9121_REG_SYS_MASK_3),
regmap_reg_range(DA9121_REG_SYS_CONFIG_2, DA9121_REG_SYS_CONFIG_3),
regmap_reg_range(DA9121_REG_SYS_GPIO0_0, DA9121_REG_SYS_GPIO2_1),
regmap_reg_range(DA9121_REG_BUCK_BUCK1_0, DA9121_REG_BUCK_BUCK1_2),
regmap_reg_range(DA9121_REG_BUCK_BUCK1_4, DA9121_REG_BUCK_BUCK1_6),
};
static const struct regmap_access_table da9121_1ch_writeable_table = {
.yes_ranges = da9121_1ch_writeable_ranges,
.n_yes_ranges = ARRAY_SIZE(da9121_1ch_writeable_ranges),
};
static const struct regmap_range da9121_2ch_writeable_ranges[] = {
regmap_reg_range(DA9121_REG_SYS_EVENT_0, DA9121_REG_SYS_MASK_3),
regmap_reg_range(DA9121_REG_SYS_CONFIG_2, DA9121_REG_SYS_CONFIG_3),
regmap_reg_range(DA9121_REG_SYS_GPIO0_0, DA9121_REG_SYS_GPIO2_1),
regmap_reg_range(DA9121_REG_BUCK_BUCK1_0, DA9121_REG_BUCK_BUCK1_2),
regmap_reg_range(DA9121_REG_BUCK_BUCK1_4, DA9121_REG_BUCK_BUCK1_7),
regmap_reg_range(DA9xxx_REG_BUCK_BUCK2_0, DA9xxx_REG_BUCK_BUCK2_2),
regmap_reg_range(DA9xxx_REG_BUCK_BUCK2_4, DA9xxx_REG_BUCK_BUCK2_7),
};
static const struct regmap_access_table da9121_2ch_writeable_table = {
.yes_ranges = da9121_2ch_writeable_ranges,
.n_yes_ranges = ARRAY_SIZE(da9121_2ch_writeable_ranges),
};
static const struct regmap_range da9121_volatile_ranges[] = {
regmap_reg_range(DA9121_REG_SYS_STATUS_0, DA9121_REG_SYS_EVENT_2),
regmap_reg_range(DA9121_REG_SYS_GPIO0_0, DA9121_REG_SYS_GPIO2_1),
regmap_reg_range(DA9121_REG_BUCK_BUCK1_0, DA9121_REG_BUCK_BUCK1_6),
};
static const struct regmap_access_table da9121_volatile_table = {
.yes_ranges = da9121_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(da9121_volatile_ranges),
};
/* DA9121 regmap config for 1 channel variants */
static struct regmap_config da9121_1ch_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DA9121_REG_OTP_CONFIG_ID,
.rd_table = &da9121_1ch_readable_table,
.wr_table = &da9121_1ch_writeable_table,
.volatile_table = &da9121_volatile_table,
.cache_type = REGCACHE_RBTREE,
};
/* DA9121 regmap config for 2 channel variants */
static struct regmap_config da9121_2ch_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DA9121_REG_OTP_CONFIG_ID,
.rd_table = &da9121_2ch_readable_table,
.wr_table = &da9121_2ch_writeable_table,
.volatile_table = &da9121_volatile_table,
.cache_type = REGCACHE_RBTREE,
};
static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
{
u32 device_id;
u32 variant_id;
u8 variant_mrc, variant_vrc;
char *type;
bool config_match = false;
int ret = 0;
ret = regmap_read(chip->regmap, DA9121_REG_OTP_DEVICE_ID, &device_id);
if (ret < 0) {
dev_err(chip->dev, "Cannot read device ID: %d\n", ret);
goto error;
}
ret = regmap_read(chip->regmap, DA9121_REG_OTP_VARIANT_ID, &variant_id);
if (ret < 0) {
dev_err(chip->dev, "Cannot read variant ID: %d\n", ret);
goto error;
}
if ((device_id != DA9121_DEVICE_ID) && (device_id != DA914x_DEVICE_ID)) {
dev_err(chip->dev, "Invalid device ID: 0x%02x\n", device_id);
ret = -ENODEV;
goto error;
}
variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
switch (chip->subvariant_id) {
case DA9121_SUBTYPE_DA9121:
type = "DA9121";
config_match = (variant_vrc == DA9121_VARIANT_VRC);
break;
case DA9121_SUBTYPE_DA9130:
type = "DA9130";
config_match = (variant_vrc == DA9130_VARIANT_VRC);
break;
case DA9121_SUBTYPE_DA9220:
type = "DA9220";
config_match = (variant_vrc == DA9220_VARIANT_VRC);
break;
case DA9121_SUBTYPE_DA9132:
type = "DA9132";
config_match = (variant_vrc == DA9132_VARIANT_VRC);
break;
case DA9121_SUBTYPE_DA9122:
type = "DA9122";
config_match = (variant_vrc == DA9122_VARIANT_VRC);
break;
case DA9121_SUBTYPE_DA9131:
type = "DA9131";
config_match = (variant_vrc == DA9131_VARIANT_VRC);
break;
case DA9121_SUBTYPE_DA9217:
type = "DA9217";
config_match = (variant_vrc == DA9217_VARIANT_VRC);
break;
default:
type = "Unknown";
break;
}
if (device_id == DA914x_DEVICE_ID) {
switch (chip->subvariant_id) {
case DA9121_SUBTYPE_DA9141:
type = "DA9141";
config_match = (variant_vrc == DA9141_VARIANT_VRC);
break;
case DA9121_SUBTYPE_DA9142:
type = "DA9142";
config_match = (variant_vrc == DA9142_VARIANT_VRC);
break;
default:
type = "Unknown";
break;
}
}
dev_info(chip->dev,
"Device detected (device-ID: 0x%02X, var-ID: 0x%02X, %s)\n",
device_id, variant_id, type);
if (!config_match) {
dev_err(chip->dev, "Device tree configuration does not match detected device.\n");
ret = -EINVAL;
goto error;
}
variant_mrc = (variant_id & DA9121_MASK_OTP_VARIANT_ID_MRC)
>> DA9121_SHIFT_OTP_VARIANT_ID_MRC;
if (((device_id == DA9121_DEVICE_ID) &&
(variant_mrc < DA9121_VARIANT_MRC_BASE)) ||
((device_id == DA914x_DEVICE_ID) &&
(variant_mrc != DA914x_VARIANT_MRC_BASE))) {
dev_err(chip->dev,
"Cannot support variant MRC: 0x%02X\n", variant_mrc);
ret = -EINVAL;
}
error:
return ret;
}
static int da9121_assign_chip_model(struct i2c_client *i2c,
struct da9121 *chip)
{
struct regmap_config *regmap;
int ret = 0;
chip->dev = &i2c->dev;
/* Use configured subtype to select the regulator descriptor index and
* register map, common to both consumer and automotive grade variants
*/
switch (chip->subvariant_id) {
case DA9121_SUBTYPE_DA9121:
case DA9121_SUBTYPE_DA9130:
chip->variant_id = DA9121_TYPE_DA9121_DA9130;
regmap = &da9121_1ch_regmap_config;
break;
case DA9121_SUBTYPE_DA9217:
chip->variant_id = DA9121_TYPE_DA9217;
regmap = &da9121_1ch_regmap_config;
break;
case DA9121_SUBTYPE_DA9122:
case DA9121_SUBTYPE_DA9131:
chip->variant_id = DA9121_TYPE_DA9122_DA9131;
regmap = &da9121_2ch_regmap_config;
break;
case DA9121_SUBTYPE_DA9220:
case DA9121_SUBTYPE_DA9132:
chip->variant_id = DA9121_TYPE_DA9220_DA9132;
regmap = &da9121_2ch_regmap_config;
break;
case DA9121_SUBTYPE_DA9141:
chip->variant_id = DA9121_TYPE_DA9141;
regmap = &da9121_1ch_regmap_config;
break;
case DA9121_SUBTYPE_DA9142:
chip->variant_id = DA9121_TYPE_DA9142;
regmap = &da9121_2ch_regmap_config;
break;
default:
return -EINVAL;
}
/* Set these up for of_regulator_match call which may want .of_map_modes */
da9121_matches[0].desc = local_da9121_regulators[chip->variant_id][0];
da9121_matches[1].desc = local_da9121_regulators[chip->variant_id][1];
chip->regmap = devm_regmap_init_i2c(i2c, regmap);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
dev_err(chip->dev, "Failed to configure a register map: %d\n",
ret);
return ret;
}
ret = da9121_check_device_type(i2c, chip);
return ret;
}
static int da9121_config_irq(struct i2c_client *i2c,
struct da9121 *chip)
{
unsigned int p_delay = DA9121_DEFAULT_POLLING_PERIOD_MS;
const int mask_all[4] = { 0, 0, 0xFF, 0xFF };
int ret = 0;
chip->chip_irq = i2c->irq;
if (chip->chip_irq != 0) {
if (!of_property_read_u32(chip->dev->of_node,
"dlg,irq-polling-delay-passive-ms",
&p_delay)) {
if (p_delay < DA9121_MIN_POLLING_PERIOD_MS ||
p_delay > DA9121_MAX_POLLING_PERIOD_MS) {
dev_warn(chip->dev,
"Out-of-range polling period %d ms\n",
p_delay);
p_delay = DA9121_DEFAULT_POLLING_PERIOD_MS;
}
}
chip->passive_delay = p_delay;
ret = request_threaded_irq(chip->chip_irq, NULL,
da9121_irq_handler,
IRQF_TRIGGER_LOW|IRQF_ONESHOT,
"da9121", chip);
if (ret != 0) {
dev_err(chip->dev, "Failed IRQ request: %d\n",
chip->chip_irq);
goto error;
}
ret = regmap_bulk_write(chip->regmap, DA9121_REG_SYS_MASK_0, mask_all, 4);
if (ret != 0) {
dev_err(chip->dev, "Failed to set IRQ masks: %d\n",
ret);
goto regmap_error;
}
INIT_DELAYED_WORK(&chip->work, da9121_status_poll_on);
dev_info(chip->dev, "Interrupt polling period set at %d ms\n",
chip->passive_delay);
}
error:
return ret;
regmap_error:
free_irq(chip->chip_irq, chip);
return ret;
}
static const struct of_device_id da9121_dt_ids[] = {
{ .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
{ .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
{ .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
{ .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
{ .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
{ .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
{ .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
{ .compatible = "dlg,da9141", .data = (void *) DA9121_SUBTYPE_DA9141 },
{ .compatible = "dlg,da9142", .data = (void *) DA9121_SUBTYPE_DA9142 },
{ }
};
MODULE_DEVICE_TABLE(of, da9121_dt_ids);
static inline int da9121_of_get_id(struct device *dev)
{
const struct of_device_id *id = of_match_device(da9121_dt_ids, dev);
if (!id) {
dev_err(dev, "%s: Failed\n", __func__);
return -EINVAL;
}
return (uintptr_t)id->data;
}
static int da9121_i2c_probe(struct i2c_client *i2c)
{
struct da9121 *chip;
const int mask_all[4] = { 0xFF, 0xFF, 0xFF, 0xFF };
int ret = 0;
chip = devm_kzalloc(&i2c->dev, sizeof(struct da9121), GFP_KERNEL);
if (!chip) {
ret = -ENOMEM;
goto error;
}
chip->pdata = i2c->dev.platform_data;
chip->subvariant_id = da9121_of_get_id(&i2c->dev);
ret = da9121_assign_chip_model(i2c, chip);
if (ret < 0)
goto error;
ret = regmap_bulk_write(chip->regmap, DA9121_REG_SYS_MASK_0, mask_all, 4);
if (ret != 0) {
dev_err(chip->dev, "Failed to set IRQ masks: %d\n", ret);
goto error;
}
ret = da9121_set_regulator_config(chip);
if (ret < 0)
goto error;
ret = da9121_config_irq(i2c, chip);
error:
return ret;
}
static void da9121_i2c_remove(struct i2c_client *i2c)
{
struct da9121 *chip = i2c_get_clientdata(i2c);
const int mask_all[4] = { 0xFF, 0xFF, 0xFF, 0xFF };
int ret;
free_irq(chip->chip_irq, chip);
cancel_delayed_work_sync(&chip->work);
ret = regmap_bulk_write(chip->regmap, DA9121_REG_SYS_MASK_0, mask_all, 4);
if (ret != 0)
dev_err(chip->dev, "Failed to set IRQ masks: %d\n", ret);
}
static const struct i2c_device_id da9121_i2c_id[] = {
{"da9121", DA9121_TYPE_DA9121_DA9130},
{"da9130", DA9121_TYPE_DA9121_DA9130},
{"da9217", DA9121_TYPE_DA9217},
{"da9122", DA9121_TYPE_DA9122_DA9131},
{"da9131", DA9121_TYPE_DA9122_DA9131},
{"da9220", DA9121_TYPE_DA9220_DA9132},
{"da9132", DA9121_TYPE_DA9220_DA9132},
{"da9141", DA9121_TYPE_DA9141},
{"da9142", DA9121_TYPE_DA9142},
{},
};
MODULE_DEVICE_TABLE(i2c, da9121_i2c_id);
static struct i2c_driver da9121_regulator_driver = {
.driver = {
.name = "da9121",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = da9121_dt_ids,
},
.probe = da9121_i2c_probe,
.remove = da9121_i2c_remove,
.id_table = da9121_i2c_id,
};
module_i2c_driver(da9121_regulator_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/da9121-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// wm8350.c -- Voltage and current regulation for the Wolfson WM8350 PMIC
//
// Copyright 2007, 2008 Wolfson Microelectronics PLC.
//
// Author: Liam Girdwood
// [email protected]
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/mfd/wm8350/core.h>
#include <linux/mfd/wm8350/pmic.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
/* Maximum value possible for VSEL */
#define WM8350_DCDC_MAX_VSEL 0x66
/* Microamps */
static const unsigned int isink_cur[] = {
4,
5,
6,
7,
8,
10,
11,
14,
16,
19,
23,
27,
32,
39,
46,
54,
65,
77,
92,
109,
130,
154,
183,
218,
259,
308,
367,
436,
518,
616,
733,
872,
1037,
1233,
1466,
1744,
2073,
2466,
2933,
3487,
4147,
4932,
5865,
6975,
8294,
9864,
11730,
13949,
16589,
19728,
23460,
27899,
33178,
39455,
46920,
55798,
66355,
78910,
93840,
111596,
132710,
157820,
187681,
223191
};
/* turn on ISINK followed by DCDC */
static int wm8350_isink_enable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int isink = rdev_get_id(rdev);
switch (isink) {
case WM8350_ISINK_A:
switch (wm8350->pmic.isink_A_dcdc) {
case WM8350_DCDC_2:
case WM8350_DCDC_5:
wm8350_set_bits(wm8350, WM8350_POWER_MGMT_7,
WM8350_CS1_ENA);
wm8350_set_bits(wm8350, WM8350_CSA_FLASH_CONTROL,
WM8350_CS1_DRIVE);
wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED,
1 << (wm8350->pmic.isink_A_dcdc -
WM8350_DCDC_1));
break;
default:
return -EINVAL;
}
break;
case WM8350_ISINK_B:
switch (wm8350->pmic.isink_B_dcdc) {
case WM8350_DCDC_2:
case WM8350_DCDC_5:
wm8350_set_bits(wm8350, WM8350_POWER_MGMT_7,
WM8350_CS2_ENA);
wm8350_set_bits(wm8350, WM8350_CSB_FLASH_CONTROL,
WM8350_CS2_DRIVE);
wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED,
1 << (wm8350->pmic.isink_B_dcdc -
WM8350_DCDC_1));
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8350_isink_disable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int isink = rdev_get_id(rdev);
switch (isink) {
case WM8350_ISINK_A:
switch (wm8350->pmic.isink_A_dcdc) {
case WM8350_DCDC_2:
case WM8350_DCDC_5:
wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED,
1 << (wm8350->pmic.isink_A_dcdc -
WM8350_DCDC_1));
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_7,
WM8350_CS1_ENA);
break;
default:
return -EINVAL;
}
break;
case WM8350_ISINK_B:
switch (wm8350->pmic.isink_B_dcdc) {
case WM8350_DCDC_2:
case WM8350_DCDC_5:
wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED,
1 << (wm8350->pmic.isink_B_dcdc -
WM8350_DCDC_1));
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_7,
WM8350_CS2_ENA);
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8350_isink_is_enabled(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int isink = rdev_get_id(rdev);
switch (isink) {
case WM8350_ISINK_A:
return wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_A) &
0x8000;
case WM8350_ISINK_B:
return wm8350_reg_read(wm8350, WM8350_CURRENT_SINK_DRIVER_B) &
0x8000;
}
return -EINVAL;
}
static int wm8350_isink_enable_time(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int isink = rdev_get_id(rdev);
int reg;
switch (isink) {
case WM8350_ISINK_A:
reg = wm8350_reg_read(wm8350, WM8350_CSA_FLASH_CONTROL);
break;
case WM8350_ISINK_B:
reg = wm8350_reg_read(wm8350, WM8350_CSB_FLASH_CONTROL);
break;
default:
return -EINVAL;
}
if (reg & WM8350_CS1_FLASH_MODE) {
switch (reg & WM8350_CS1_ON_RAMP_MASK) {
case 0:
return 0;
case 1:
return 1950;
case 2:
return 3910;
case 3:
return 7800;
}
} else {
switch (reg & WM8350_CS1_ON_RAMP_MASK) {
case 0:
return 0;
case 1:
return 250000;
case 2:
return 500000;
case 3:
return 1000000;
}
}
return -EINVAL;
}
int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
u16 trigger, u16 duration, u16 on_ramp, u16 off_ramp,
u16 drive)
{
switch (isink) {
case WM8350_ISINK_A:
wm8350_reg_write(wm8350, WM8350_CSA_FLASH_CONTROL,
(mode ? WM8350_CS1_FLASH_MODE : 0) |
(trigger ? WM8350_CS1_TRIGSRC : 0) |
duration | on_ramp | off_ramp | drive);
break;
case WM8350_ISINK_B:
wm8350_reg_write(wm8350, WM8350_CSB_FLASH_CONTROL,
(mode ? WM8350_CS2_FLASH_MODE : 0) |
(trigger ? WM8350_CS2_TRIGSRC : 0) |
duration | on_ramp | off_ramp | drive);
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(wm8350_isink_set_flash);
static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int sel, volt_reg, dcdc = rdev_get_id(rdev);
u16 val;
dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, dcdc, uV / 1000);
switch (dcdc) {
case WM8350_DCDC_1:
volt_reg = WM8350_DCDC1_LOW_POWER;
break;
case WM8350_DCDC_3:
volt_reg = WM8350_DCDC3_LOW_POWER;
break;
case WM8350_DCDC_4:
volt_reg = WM8350_DCDC4_LOW_POWER;
break;
case WM8350_DCDC_6:
volt_reg = WM8350_DCDC6_LOW_POWER;
break;
case WM8350_DCDC_2:
case WM8350_DCDC_5:
default:
return -EINVAL;
}
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
return sel;
/* all DCDCs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
wm8350_reg_write(wm8350, volt_reg, val | sel);
return 0;
}
static int wm8350_dcdc_set_suspend_enable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int dcdc = rdev_get_id(rdev);
u16 val;
switch (dcdc) {
case WM8350_DCDC_1:
val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC1_LOW_POWER,
val | wm8350->pmic.dcdc1_hib_mode);
break;
case WM8350_DCDC_3:
val = wm8350_reg_read(wm8350, WM8350_DCDC3_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC3_LOW_POWER,
val | wm8350->pmic.dcdc3_hib_mode);
break;
case WM8350_DCDC_4:
val = wm8350_reg_read(wm8350, WM8350_DCDC4_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC4_LOW_POWER,
val | wm8350->pmic.dcdc4_hib_mode);
break;
case WM8350_DCDC_6:
val = wm8350_reg_read(wm8350, WM8350_DCDC6_LOW_POWER)
& ~WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC6_LOW_POWER,
val | wm8350->pmic.dcdc6_hib_mode);
break;
case WM8350_DCDC_2:
case WM8350_DCDC_5:
default:
return -EINVAL;
}
return 0;
}
static int wm8350_dcdc_set_suspend_disable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int dcdc = rdev_get_id(rdev);
u16 val;
switch (dcdc) {
case WM8350_DCDC_1:
val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER);
wm8350->pmic.dcdc1_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC1_LOW_POWER,
val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_3:
val = wm8350_reg_read(wm8350, WM8350_DCDC3_LOW_POWER);
wm8350->pmic.dcdc3_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC3_LOW_POWER,
val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_4:
val = wm8350_reg_read(wm8350, WM8350_DCDC4_LOW_POWER);
wm8350->pmic.dcdc4_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC4_LOW_POWER,
val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_6:
val = wm8350_reg_read(wm8350, WM8350_DCDC6_LOW_POWER);
wm8350->pmic.dcdc6_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC6_LOW_POWER,
val | WM8350_DCDC_HIB_MODE_DIS);
break;
case WM8350_DCDC_2:
case WM8350_DCDC_5:
default:
return -EINVAL;
}
return 0;
}
static int wm8350_dcdc25_set_suspend_enable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int dcdc = rdev_get_id(rdev);
u16 val;
switch (dcdc) {
case WM8350_DCDC_2:
val = wm8350_reg_read(wm8350, WM8350_DCDC2_CONTROL)
& ~WM8350_DC2_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC2_CONTROL, val |
(WM8350_DC2_HIB_MODE_ACTIVE << WM8350_DC2_HIB_MODE_SHIFT));
break;
case WM8350_DCDC_5:
val = wm8350_reg_read(wm8350, WM8350_DCDC5_CONTROL)
& ~WM8350_DC5_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC5_CONTROL, val |
(WM8350_DC5_HIB_MODE_ACTIVE << WM8350_DC5_HIB_MODE_SHIFT));
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8350_dcdc25_set_suspend_disable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int dcdc = rdev_get_id(rdev);
u16 val;
switch (dcdc) {
case WM8350_DCDC_2:
val = wm8350_reg_read(wm8350, WM8350_DCDC2_CONTROL)
& ~WM8350_DC2_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC2_CONTROL, val |
(WM8350_DC2_HIB_MODE_DISABLE << WM8350_DC2_HIB_MODE_SHIFT));
break;
case WM8350_DCDC_5:
val = wm8350_reg_read(wm8350, WM8350_DCDC5_CONTROL)
& ~WM8350_DC5_HIB_MODE_MASK;
wm8350_reg_write(wm8350, WM8350_DCDC5_CONTROL, val |
(WM8350_DC5_HIB_MODE_DISABLE << WM8350_DC5_HIB_MODE_SHIFT));
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int dcdc = rdev_get_id(rdev);
u16 *hib_mode;
switch (dcdc) {
case WM8350_DCDC_1:
hib_mode = &wm8350->pmic.dcdc1_hib_mode;
break;
case WM8350_DCDC_3:
hib_mode = &wm8350->pmic.dcdc3_hib_mode;
break;
case WM8350_DCDC_4:
hib_mode = &wm8350->pmic.dcdc4_hib_mode;
break;
case WM8350_DCDC_6:
hib_mode = &wm8350->pmic.dcdc6_hib_mode;
break;
case WM8350_DCDC_2:
case WM8350_DCDC_5:
default:
return -EINVAL;
}
switch (mode) {
case REGULATOR_MODE_NORMAL:
*hib_mode = WM8350_DCDC_HIB_MODE_IMAGE;
break;
case REGULATOR_MODE_IDLE:
*hib_mode = WM8350_DCDC_HIB_MODE_STANDBY;
break;
case REGULATOR_MODE_STANDBY:
*hib_mode = WM8350_DCDC_HIB_MODE_LDO_IM;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct linear_range wm8350_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 15, 50000),
REGULATOR_LINEAR_RANGE(1800000, 16, 31, 100000),
};
static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int sel, volt_reg, ldo = rdev_get_id(rdev);
u16 val;
dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, ldo, uV / 1000);
switch (ldo) {
case WM8350_LDO_1:
volt_reg = WM8350_LDO1_LOW_POWER;
break;
case WM8350_LDO_2:
volt_reg = WM8350_LDO2_LOW_POWER;
break;
case WM8350_LDO_3:
volt_reg = WM8350_LDO3_LOW_POWER;
break;
case WM8350_LDO_4:
volt_reg = WM8350_LDO4_LOW_POWER;
break;
default:
return -EINVAL;
}
sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
return sel;
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
wm8350_reg_write(wm8350, volt_reg, val | sel);
return 0;
}
static int wm8350_ldo_set_suspend_enable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int volt_reg, ldo = rdev_get_id(rdev);
u16 val;
switch (ldo) {
case WM8350_LDO_1:
volt_reg = WM8350_LDO1_LOW_POWER;
break;
case WM8350_LDO_2:
volt_reg = WM8350_LDO2_LOW_POWER;
break;
case WM8350_LDO_3:
volt_reg = WM8350_LDO3_LOW_POWER;
break;
case WM8350_LDO_4:
volt_reg = WM8350_LDO4_LOW_POWER;
break;
default:
return -EINVAL;
}
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_HIB_MODE_MASK;
wm8350_reg_write(wm8350, volt_reg, val);
return 0;
}
static int wm8350_ldo_set_suspend_disable(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int volt_reg, ldo = rdev_get_id(rdev);
u16 val;
switch (ldo) {
case WM8350_LDO_1:
volt_reg = WM8350_LDO1_LOW_POWER;
break;
case WM8350_LDO_2:
volt_reg = WM8350_LDO2_LOW_POWER;
break;
case WM8350_LDO_3:
volt_reg = WM8350_LDO3_LOW_POWER;
break;
case WM8350_LDO_4:
volt_reg = WM8350_LDO4_LOW_POWER;
break;
default:
return -EINVAL;
}
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_HIB_MODE_MASK;
wm8350_reg_write(wm8350, volt_reg, val | WM8350_LDO1_HIB_MODE_DIS);
return 0;
}
int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start,
u16 stop, u16 fault)
{
int slot_reg;
u16 val;
dev_dbg(wm8350->dev, "%s %d start %d stop %d\n",
__func__, dcdc, start, stop);
/* slot valid ? */
if (start > 15 || stop > 15)
return -EINVAL;
switch (dcdc) {
case WM8350_DCDC_1:
slot_reg = WM8350_DCDC1_TIMEOUTS;
break;
case WM8350_DCDC_2:
slot_reg = WM8350_DCDC2_TIMEOUTS;
break;
case WM8350_DCDC_3:
slot_reg = WM8350_DCDC3_TIMEOUTS;
break;
case WM8350_DCDC_4:
slot_reg = WM8350_DCDC4_TIMEOUTS;
break;
case WM8350_DCDC_5:
slot_reg = WM8350_DCDC5_TIMEOUTS;
break;
case WM8350_DCDC_6:
slot_reg = WM8350_DCDC6_TIMEOUTS;
break;
default:
return -EINVAL;
}
val = wm8350_reg_read(wm8350, slot_reg) &
~(WM8350_DC1_ENSLOT_MASK | WM8350_DC1_SDSLOT_MASK |
WM8350_DC1_ERRACT_MASK);
wm8350_reg_write(wm8350, slot_reg,
val | (start << WM8350_DC1_ENSLOT_SHIFT) |
(stop << WM8350_DC1_SDSLOT_SHIFT) |
(fault << WM8350_DC1_ERRACT_SHIFT));
return 0;
}
EXPORT_SYMBOL_GPL(wm8350_dcdc_set_slot);
int wm8350_ldo_set_slot(struct wm8350 *wm8350, int ldo, u16 start, u16 stop)
{
int slot_reg;
u16 val;
dev_dbg(wm8350->dev, "%s %d start %d stop %d\n",
__func__, ldo, start, stop);
/* slot valid ? */
if (start > 15 || stop > 15)
return -EINVAL;
switch (ldo) {
case WM8350_LDO_1:
slot_reg = WM8350_LDO1_TIMEOUTS;
break;
case WM8350_LDO_2:
slot_reg = WM8350_LDO2_TIMEOUTS;
break;
case WM8350_LDO_3:
slot_reg = WM8350_LDO3_TIMEOUTS;
break;
case WM8350_LDO_4:
slot_reg = WM8350_LDO4_TIMEOUTS;
break;
default:
return -EINVAL;
}
val = wm8350_reg_read(wm8350, slot_reg) & ~WM8350_LDO1_SDSLOT_MASK;
wm8350_reg_write(wm8350, slot_reg, val | ((start << 10) | (stop << 6)));
return 0;
}
EXPORT_SYMBOL_GPL(wm8350_ldo_set_slot);
int wm8350_dcdc25_set_mode(struct wm8350 *wm8350, int dcdc, u16 mode,
u16 ilim, u16 ramp, u16 feedback)
{
u16 val;
dev_dbg(wm8350->dev, "%s %d mode: %s %s\n", __func__, dcdc,
mode ? "normal" : "boost", ilim ? "low" : "normal");
switch (dcdc) {
case WM8350_DCDC_2:
val = wm8350_reg_read(wm8350, WM8350_DCDC2_CONTROL)
& ~(WM8350_DC2_MODE_MASK | WM8350_DC2_ILIM_MASK |
WM8350_DC2_RMP_MASK | WM8350_DC2_FBSRC_MASK);
wm8350_reg_write(wm8350, WM8350_DCDC2_CONTROL, val |
(mode << WM8350_DC2_MODE_SHIFT) |
(ilim << WM8350_DC2_ILIM_SHIFT) |
(ramp << WM8350_DC2_RMP_SHIFT) |
(feedback << WM8350_DC2_FBSRC_SHIFT));
break;
case WM8350_DCDC_5:
val = wm8350_reg_read(wm8350, WM8350_DCDC5_CONTROL)
& ~(WM8350_DC5_MODE_MASK | WM8350_DC5_ILIM_MASK |
WM8350_DC5_RMP_MASK | WM8350_DC5_FBSRC_MASK);
wm8350_reg_write(wm8350, WM8350_DCDC5_CONTROL, val |
(mode << WM8350_DC5_MODE_SHIFT) |
(ilim << WM8350_DC5_ILIM_SHIFT) |
(ramp << WM8350_DC5_RMP_SHIFT) |
(feedback << WM8350_DC5_FBSRC_SHIFT));
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(wm8350_dcdc25_set_mode);
static int force_continuous_enable(struct wm8350 *wm8350, int dcdc, int enable)
{
int reg = 0, ret;
switch (dcdc) {
case WM8350_DCDC_1:
reg = WM8350_DCDC1_FORCE_PWM;
break;
case WM8350_DCDC_3:
reg = WM8350_DCDC3_FORCE_PWM;
break;
case WM8350_DCDC_4:
reg = WM8350_DCDC4_FORCE_PWM;
break;
case WM8350_DCDC_6:
reg = WM8350_DCDC6_FORCE_PWM;
break;
default:
return -EINVAL;
}
if (enable)
ret = wm8350_set_bits(wm8350, reg,
WM8350_DCDC1_FORCE_PWM_ENA);
else
ret = wm8350_clear_bits(wm8350, reg,
WM8350_DCDC1_FORCE_PWM_ENA);
return ret;
}
static int wm8350_dcdc_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int dcdc = rdev_get_id(rdev);
u16 val;
if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6)
return -EINVAL;
if (dcdc == WM8350_DCDC_2 || dcdc == WM8350_DCDC_5)
return -EINVAL;
val = 1 << (dcdc - WM8350_DCDC_1);
switch (mode) {
case REGULATOR_MODE_FAST:
/* force continuous mode */
wm8350_set_bits(wm8350, WM8350_DCDC_ACTIVE_OPTIONS, val);
wm8350_clear_bits(wm8350, WM8350_DCDC_SLEEP_OPTIONS, val);
force_continuous_enable(wm8350, dcdc, 1);
break;
case REGULATOR_MODE_NORMAL:
/* active / pulse skipping */
wm8350_set_bits(wm8350, WM8350_DCDC_ACTIVE_OPTIONS, val);
wm8350_clear_bits(wm8350, WM8350_DCDC_SLEEP_OPTIONS, val);
force_continuous_enable(wm8350, dcdc, 0);
break;
case REGULATOR_MODE_IDLE:
/* standby mode */
force_continuous_enable(wm8350, dcdc, 0);
wm8350_clear_bits(wm8350, WM8350_DCDC_SLEEP_OPTIONS, val);
wm8350_clear_bits(wm8350, WM8350_DCDC_ACTIVE_OPTIONS, val);
break;
case REGULATOR_MODE_STANDBY:
/* LDO mode */
force_continuous_enable(wm8350, dcdc, 0);
wm8350_set_bits(wm8350, WM8350_DCDC_SLEEP_OPTIONS, val);
break;
}
return 0;
}
static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int dcdc = rdev_get_id(rdev);
u16 mask, sleep, active, force;
int mode = REGULATOR_MODE_NORMAL;
int reg;
switch (dcdc) {
case WM8350_DCDC_1:
reg = WM8350_DCDC1_FORCE_PWM;
break;
case WM8350_DCDC_3:
reg = WM8350_DCDC3_FORCE_PWM;
break;
case WM8350_DCDC_4:
reg = WM8350_DCDC4_FORCE_PWM;
break;
case WM8350_DCDC_6:
reg = WM8350_DCDC6_FORCE_PWM;
break;
default:
return -EINVAL;
}
mask = 1 << (dcdc - WM8350_DCDC_1);
active = wm8350_reg_read(wm8350, WM8350_DCDC_ACTIVE_OPTIONS) & mask;
force = wm8350_reg_read(wm8350, reg) & WM8350_DCDC1_FORCE_PWM_ENA;
sleep = wm8350_reg_read(wm8350, WM8350_DCDC_SLEEP_OPTIONS) & mask;
dev_dbg(wm8350->dev, "mask %x active %x sleep %x force %x",
mask, active, sleep, force);
if (active && !sleep) {
if (force)
mode = REGULATOR_MODE_FAST;
else
mode = REGULATOR_MODE_NORMAL;
} else if (!active && !sleep)
mode = REGULATOR_MODE_IDLE;
else if (sleep)
mode = REGULATOR_MODE_STANDBY;
return mode;
}
static unsigned int wm8350_ldo_get_mode(struct regulator_dev *rdev)
{
return REGULATOR_MODE_NORMAL;
}
struct wm8350_dcdc_efficiency {
int uA_load_min;
int uA_load_max;
unsigned int mode;
};
static const struct wm8350_dcdc_efficiency dcdc1_6_efficiency[] = {
{0, 10000, REGULATOR_MODE_STANDBY}, /* 0 - 10mA - LDO */
{10000, 100000, REGULATOR_MODE_IDLE}, /* 10mA - 100mA - Standby */
{100000, 1000000, REGULATOR_MODE_NORMAL}, /* > 100mA - Active */
{-1, -1, REGULATOR_MODE_NORMAL},
};
static const struct wm8350_dcdc_efficiency dcdc3_4_efficiency[] = {
{0, 10000, REGULATOR_MODE_STANDBY}, /* 0 - 10mA - LDO */
{10000, 100000, REGULATOR_MODE_IDLE}, /* 10mA - 100mA - Standby */
{100000, 800000, REGULATOR_MODE_NORMAL}, /* > 100mA - Active */
{-1, -1, REGULATOR_MODE_NORMAL},
};
static unsigned int get_mode(int uA, const struct wm8350_dcdc_efficiency *eff)
{
int i = 0;
while (eff[i].uA_load_min != -1) {
if (uA >= eff[i].uA_load_min && uA <= eff[i].uA_load_max)
return eff[i].mode;
i++;
}
return REGULATOR_MODE_NORMAL;
}
/* Query the regulator for it's most efficient mode @ uV,uA
* WM8350 regulator efficiency is pretty similar over
* different input and output uV.
*/
static unsigned int wm8350_dcdc_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV,
int output_uA)
{
int dcdc = rdev_get_id(rdev), mode;
switch (dcdc) {
case WM8350_DCDC_1:
case WM8350_DCDC_6:
mode = get_mode(output_uA, dcdc1_6_efficiency);
break;
case WM8350_DCDC_3:
case WM8350_DCDC_4:
mode = get_mode(output_uA, dcdc3_4_efficiency);
break;
default:
mode = REGULATOR_MODE_NORMAL;
break;
}
return mode;
}
static const struct regulator_ops wm8350_dcdc_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_mode = wm8350_dcdc_get_mode,
.set_mode = wm8350_dcdc_set_mode,
.get_optimum_mode = wm8350_dcdc_get_optimum_mode,
.set_suspend_voltage = wm8350_dcdc_set_suspend_voltage,
.set_suspend_enable = wm8350_dcdc_set_suspend_enable,
.set_suspend_disable = wm8350_dcdc_set_suspend_disable,
.set_suspend_mode = wm8350_dcdc_set_suspend_mode,
};
static const struct regulator_ops wm8350_dcdc2_5_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_suspend_enable = wm8350_dcdc25_set_suspend_enable,
.set_suspend_disable = wm8350_dcdc25_set_suspend_disable,
};
static const struct regulator_ops wm8350_ldo_ops = {
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_mode = wm8350_ldo_get_mode,
.set_suspend_voltage = wm8350_ldo_set_suspend_voltage,
.set_suspend_enable = wm8350_ldo_set_suspend_enable,
.set_suspend_disable = wm8350_ldo_set_suspend_disable,
};
static const struct regulator_ops wm8350_isink_ops = {
.set_current_limit = regulator_set_current_limit_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
.enable = wm8350_isink_enable,
.disable = wm8350_isink_disable,
.is_enabled = wm8350_isink_is_enabled,
.enable_time = wm8350_isink_enable_time,
};
static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
{
.name = "DCDC1",
.id = WM8350_DCDC_1,
.ops = &wm8350_dcdc_ops,
.irq = WM8350_IRQ_UV_DC1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
.min_uV = 850000,
.uV_step = 25000,
.vsel_reg = WM8350_DCDC1_CONTROL,
.vsel_mask = WM8350_DC1_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_DC1_ENA,
.owner = THIS_MODULE,
},
{
.name = "DCDC2",
.id = WM8350_DCDC_2,
.ops = &wm8350_dcdc2_5_ops,
.irq = WM8350_IRQ_UV_DC2,
.type = REGULATOR_VOLTAGE,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_DC2_ENA,
.owner = THIS_MODULE,
},
{
.name = "DCDC3",
.id = WM8350_DCDC_3,
.ops = &wm8350_dcdc_ops,
.irq = WM8350_IRQ_UV_DC3,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
.min_uV = 850000,
.uV_step = 25000,
.vsel_reg = WM8350_DCDC3_CONTROL,
.vsel_mask = WM8350_DC3_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_DC3_ENA,
.owner = THIS_MODULE,
},
{
.name = "DCDC4",
.id = WM8350_DCDC_4,
.ops = &wm8350_dcdc_ops,
.irq = WM8350_IRQ_UV_DC4,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
.min_uV = 850000,
.uV_step = 25000,
.vsel_reg = WM8350_DCDC4_CONTROL,
.vsel_mask = WM8350_DC4_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_DC4_ENA,
.owner = THIS_MODULE,
},
{
.name = "DCDC5",
.id = WM8350_DCDC_5,
.ops = &wm8350_dcdc2_5_ops,
.irq = WM8350_IRQ_UV_DC5,
.type = REGULATOR_VOLTAGE,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_DC5_ENA,
.owner = THIS_MODULE,
},
{
.name = "DCDC6",
.id = WM8350_DCDC_6,
.ops = &wm8350_dcdc_ops,
.irq = WM8350_IRQ_UV_DC6,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
.min_uV = 850000,
.uV_step = 25000,
.vsel_reg = WM8350_DCDC6_CONTROL,
.vsel_mask = WM8350_DC6_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_DC6_ENA,
.owner = THIS_MODULE,
},
{
.name = "LDO1",
.id = WM8350_LDO_1,
.ops = &wm8350_ldo_ops,
.irq = WM8350_IRQ_UV_LDO1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO1_VSEL_MASK + 1,
.linear_ranges = wm8350_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO1_CONTROL,
.vsel_mask = WM8350_LDO1_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_LDO1_ENA,
.owner = THIS_MODULE,
},
{
.name = "LDO2",
.id = WM8350_LDO_2,
.ops = &wm8350_ldo_ops,
.irq = WM8350_IRQ_UV_LDO2,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO2_VSEL_MASK + 1,
.linear_ranges = wm8350_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO2_CONTROL,
.vsel_mask = WM8350_LDO2_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_LDO2_ENA,
.owner = THIS_MODULE,
},
{
.name = "LDO3",
.id = WM8350_LDO_3,
.ops = &wm8350_ldo_ops,
.irq = WM8350_IRQ_UV_LDO3,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO3_VSEL_MASK + 1,
.linear_ranges = wm8350_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO3_CONTROL,
.vsel_mask = WM8350_LDO3_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_LDO3_ENA,
.owner = THIS_MODULE,
},
{
.name = "LDO4",
.id = WM8350_LDO_4,
.ops = &wm8350_ldo_ops,
.irq = WM8350_IRQ_UV_LDO4,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO4_VSEL_MASK + 1,
.linear_ranges = wm8350_ldo_ranges,
.n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO4_CONTROL,
.vsel_mask = WM8350_LDO4_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
.enable_mask = WM8350_LDO4_ENA,
.owner = THIS_MODULE,
},
{
.name = "ISINKA",
.id = WM8350_ISINK_A,
.ops = &wm8350_isink_ops,
.irq = WM8350_IRQ_CS1,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
.curr_table = isink_cur,
.n_current_limits = ARRAY_SIZE(isink_cur),
.csel_reg = WM8350_CURRENT_SINK_DRIVER_A,
.csel_mask = WM8350_CS1_ISEL_MASK,
},
{
.name = "ISINKB",
.id = WM8350_ISINK_B,
.ops = &wm8350_isink_ops,
.irq = WM8350_IRQ_CS2,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
.curr_table = isink_cur,
.n_current_limits = ARRAY_SIZE(isink_cur),
.csel_reg = WM8350_CURRENT_SINK_DRIVER_B,
.csel_mask = WM8350_CS2_ISEL_MASK,
},
};
static irqreturn_t pmic_uv_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
if (irq == WM8350_IRQ_CS1 || irq == WM8350_IRQ_CS2)
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_REGULATION_OUT,
NULL);
else
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
return IRQ_HANDLED;
}
static int wm8350_regulator_probe(struct platform_device *pdev)
{
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
struct regulator_config config = { };
struct regulator_dev *rdev;
int ret;
u16 val;
if (pdev->id < WM8350_DCDC_1 || pdev->id > WM8350_ISINK_B)
return -ENODEV;
/* do any regulator specific init */
switch (pdev->id) {
case WM8350_DCDC_1:
val = wm8350_reg_read(wm8350, WM8350_DCDC1_LOW_POWER);
wm8350->pmic.dcdc1_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
break;
case WM8350_DCDC_3:
val = wm8350_reg_read(wm8350, WM8350_DCDC3_LOW_POWER);
wm8350->pmic.dcdc3_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
break;
case WM8350_DCDC_4:
val = wm8350_reg_read(wm8350, WM8350_DCDC4_LOW_POWER);
wm8350->pmic.dcdc4_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
break;
case WM8350_DCDC_6:
val = wm8350_reg_read(wm8350, WM8350_DCDC6_LOW_POWER);
wm8350->pmic.dcdc6_hib_mode = val & WM8350_DCDC_HIB_MODE_MASK;
break;
}
config.dev = &pdev->dev;
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = dev_get_drvdata(&pdev->dev);
config.regmap = wm8350->regmap;
/* register regulator */
rdev = devm_regulator_register(&pdev->dev, &wm8350_reg[pdev->id],
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
wm8350_reg[pdev->id].name);
return PTR_ERR(rdev);
}
/* register regulator IRQ */
ret = wm8350_register_irq(wm8350, wm8350_reg[pdev->id].irq,
pmic_uv_handler, 0, "UV", rdev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register regulator %s IRQ\n",
wm8350_reg[pdev->id].name);
return ret;
}
return 0;
}
static int wm8350_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq, rdev);
return 0;
}
int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
struct regulator_init_data *initdata)
{
struct platform_device *pdev;
int ret;
if (reg < 0 || reg >= NUM_WM8350_REGULATORS)
return -EINVAL;
if (wm8350->pmic.pdev[reg])
return -EBUSY;
if (reg >= WM8350_DCDC_1 && reg <= WM8350_DCDC_6 &&
reg > wm8350->pmic.max_dcdc)
return -ENODEV;
if (reg >= WM8350_ISINK_A && reg <= WM8350_ISINK_B &&
reg > wm8350->pmic.max_isink)
return -ENODEV;
pdev = platform_device_alloc("wm8350-regulator", reg);
if (!pdev)
return -ENOMEM;
wm8350->pmic.pdev[reg] = pdev;
initdata->driver_data = wm8350;
pdev->dev.platform_data = initdata;
pdev->dev.parent = wm8350->dev;
platform_set_drvdata(pdev, wm8350);
ret = platform_device_add(pdev);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to register regulator %d: %d\n",
reg, ret);
platform_device_put(pdev);
wm8350->pmic.pdev[reg] = NULL;
}
return ret;
}
EXPORT_SYMBOL_GPL(wm8350_register_regulator);
/**
* wm8350_register_led - Register a WM8350 LED output
*
* @wm8350: The WM8350 device to configure.
* @lednum: LED device index to create.
* @dcdc: The DCDC to use for the LED.
* @isink: The ISINK to use for the LED.
* @pdata: Configuration for the LED.
*
* The WM8350 supports the use of an ISINK together with a DCDC to
* provide a power-efficient LED driver. This function registers the
* regulators and instantiates the platform device for a LED. The
* operating modes for the LED regulators must be configured using
* wm8350_isink_set_flash(), wm8350_dcdc25_set_mode() and
* wm8350_dcdc_set_slot() prior to calling this function.
*/
int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
struct wm8350_led_platform_data *pdata)
{
struct wm8350_led *led;
struct platform_device *pdev;
int ret;
if (lednum >= ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) {
dev_err(wm8350->dev, "Invalid LED index %d\n", lednum);
return -ENODEV;
}
led = &wm8350->pmic.led[lednum];
if (led->pdev) {
dev_err(wm8350->dev, "LED %d already allocated\n", lednum);
return -EINVAL;
}
pdev = platform_device_alloc("wm8350-led", lednum);
if (pdev == NULL) {
dev_err(wm8350->dev, "Failed to allocate LED %d\n", lednum);
return -ENOMEM;
}
led->isink_consumer.dev_name = dev_name(&pdev->dev);
led->isink_consumer.supply = "led_isink";
led->isink_init.num_consumer_supplies = 1;
led->isink_init.consumer_supplies = &led->isink_consumer;
led->isink_init.constraints.min_uA = 0;
led->isink_init.constraints.max_uA = pdata->max_uA;
led->isink_init.constraints.valid_ops_mask
= REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS;
led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
if (ret != 0) {
platform_device_put(pdev);
return ret;
}
led->dcdc_consumer.dev_name = dev_name(&pdev->dev);
led->dcdc_consumer.supply = "led_vcc";
led->dcdc_init.num_consumer_supplies = 1;
led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
if (ret != 0) {
platform_device_put(pdev);
return ret;
}
switch (isink) {
case WM8350_ISINK_A:
wm8350->pmic.isink_A_dcdc = dcdc;
break;
case WM8350_ISINK_B:
wm8350->pmic.isink_B_dcdc = dcdc;
break;
}
pdev->dev.platform_data = pdata;
pdev->dev.parent = wm8350->dev;
ret = platform_device_add(pdev);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to register LED %d: %d\n",
lednum, ret);
platform_device_put(pdev);
return ret;
}
led->pdev = pdev;
return 0;
}
EXPORT_SYMBOL_GPL(wm8350_register_led);
static struct platform_driver wm8350_regulator_driver = {
.probe = wm8350_regulator_probe,
.remove = wm8350_regulator_remove,
.driver = {
.name = "wm8350-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init wm8350_regulator_init(void)
{
return platform_driver_register(&wm8350_regulator_driver);
}
subsys_initcall(wm8350_regulator_init);
static void __exit wm8350_regulator_exit(void)
{
platform_driver_unregister(&wm8350_regulator_driver);
}
module_exit(wm8350_regulator_exit);
/* Module information */
MODULE_AUTHOR("Liam Girdwood");
MODULE_DESCRIPTION("WM8350 voltage and current regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8350-regulator");
| linux-master | drivers/regulator/wm8350-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2021 MediaTek Inc.
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/mt6315-regulator.h>
#include <linux/regulator/of_regulator.h>
#include <linux/spmi.h>
#define MT6315_BUCK_MODE_AUTO 0
#define MT6315_BUCK_MODE_FORCE_PWM 1
#define MT6315_BUCK_MODE_LP 2
struct mt6315_regulator_info {
struct regulator_desc desc;
u32 status_reg;
u32 lp_mode_mask;
u32 lp_mode_shift;
};
struct mt_regulator_init_data {
u32 modeset_mask[MT6315_VBUCK_MAX];
};
struct mt6315_chip {
struct device *dev;
struct regmap *regmap;
};
#define MT_BUCK(_name, _bid, _vsel) \
[_bid] = { \
.desc = { \
.name = _name, \
.of_match = of_match_ptr(_name), \
.regulators_node = "regulators", \
.ops = &mt6315_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _bid, \
.owner = THIS_MODULE, \
.n_voltages = 0xc0, \
.linear_ranges = mt_volt_range1, \
.n_linear_ranges = ARRAY_SIZE(mt_volt_range1), \
.vsel_reg = _vsel, \
.vsel_mask = 0xff, \
.enable_reg = MT6315_BUCK_TOP_CON0, \
.enable_mask = BIT(_bid), \
.of_map_mode = mt6315_map_mode, \
}, \
.status_reg = _bid##_DBG4, \
.lp_mode_mask = BIT(_bid), \
.lp_mode_shift = _bid, \
}
static const struct linear_range mt_volt_range1[] = {
REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
};
static unsigned int mt6315_map_mode(unsigned int mode)
{
switch (mode) {
case MT6315_BUCK_MODE_AUTO:
return REGULATOR_MODE_NORMAL;
case MT6315_BUCK_MODE_FORCE_PWM:
return REGULATOR_MODE_FAST;
case MT6315_BUCK_MODE_LP:
return REGULATOR_MODE_IDLE;
default:
return REGULATOR_MODE_INVALID;
}
}
static unsigned int mt6315_regulator_get_mode(struct regulator_dev *rdev)
{
struct mt_regulator_init_data *init = rdev_get_drvdata(rdev);
const struct mt6315_regulator_info *info;
int ret, regval;
u32 modeset_mask;
info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
modeset_mask = init->modeset_mask[rdev_get_id(rdev)];
ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_4PHASE_ANA_CON42, ®val);
if (ret != 0) {
dev_err(&rdev->dev, "Failed to get mode: %d\n", ret);
return ret;
}
if ((regval & modeset_mask) == modeset_mask)
return REGULATOR_MODE_FAST;
ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_CON1, ®val);
if (ret != 0) {
dev_err(&rdev->dev, "Failed to get lp mode: %d\n", ret);
return ret;
}
if (regval & info->lp_mode_mask)
return REGULATOR_MODE_IDLE;
else
return REGULATOR_MODE_NORMAL;
}
static int mt6315_regulator_set_mode(struct regulator_dev *rdev,
u32 mode)
{
struct mt_regulator_init_data *init = rdev_get_drvdata(rdev);
const struct mt6315_regulator_info *info;
int ret, val, curr_mode;
u32 modeset_mask;
info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
modeset_mask = init->modeset_mask[rdev_get_id(rdev)];
curr_mode = mt6315_regulator_get_mode(rdev);
switch (mode) {
case REGULATOR_MODE_FAST:
ret = regmap_update_bits(rdev->regmap,
MT6315_BUCK_TOP_4PHASE_ANA_CON42,
modeset_mask,
modeset_mask);
break;
case REGULATOR_MODE_NORMAL:
if (curr_mode == REGULATOR_MODE_FAST) {
ret = regmap_update_bits(rdev->regmap,
MT6315_BUCK_TOP_4PHASE_ANA_CON42,
modeset_mask,
0);
} else if (curr_mode == REGULATOR_MODE_IDLE) {
ret = regmap_update_bits(rdev->regmap,
MT6315_BUCK_TOP_CON1,
info->lp_mode_mask,
0);
usleep_range(100, 110);
} else {
ret = -EINVAL;
}
break;
case REGULATOR_MODE_IDLE:
val = MT6315_BUCK_MODE_LP >> 1;
val <<= info->lp_mode_shift;
ret = regmap_update_bits(rdev->regmap,
MT6315_BUCK_TOP_CON1,
info->lp_mode_mask,
val);
break;
default:
ret = -EINVAL;
dev_err(&rdev->dev, "Unsupported mode: %d\n", mode);
break;
}
if (ret != 0) {
dev_err(&rdev->dev, "Failed to set mode: %d\n", ret);
return ret;
}
return 0;
}
static int mt6315_get_status(struct regulator_dev *rdev)
{
const struct mt6315_regulator_info *info;
int ret;
u32 regval;
info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
ret = regmap_read(rdev->regmap, info->status_reg, ®val);
if (ret < 0) {
dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
return ret;
}
return (regval & BIT(0)) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
}
static const struct regulator_ops mt6315_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6315_get_status,
.set_mode = mt6315_regulator_set_mode,
.get_mode = mt6315_regulator_get_mode,
};
static const struct mt6315_regulator_info mt6315_regulators[MT6315_VBUCK_MAX] = {
MT_BUCK("vbuck1", MT6315_VBUCK1, MT6315_BUCK_TOP_ELR0),
MT_BUCK("vbuck2", MT6315_VBUCK2, MT6315_BUCK_TOP_ELR2),
MT_BUCK("vbuck3", MT6315_VBUCK3, MT6315_BUCK_TOP_ELR4),
MT_BUCK("vbuck4", MT6315_VBUCK4, MT6315_BUCK_TOP_ELR6),
};
static const struct regmap_config mt6315_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.max_register = 0x16d0,
.fast_io = true,
};
static const struct of_device_id mt6315_of_match[] = {
{
.compatible = "mediatek,mt6315-regulator",
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, mt6315_of_match);
static int mt6315_regulator_probe(struct spmi_device *pdev)
{
struct device *dev = &pdev->dev;
struct regmap *regmap;
struct mt6315_chip *chip;
struct mt_regulator_init_data *init_data;
struct regulator_config config = {};
struct regulator_dev *rdev;
int i;
regmap = devm_regmap_init_spmi_ext(pdev, &mt6315_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
chip = devm_kzalloc(dev, sizeof(struct mt6315_chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
init_data = devm_kzalloc(dev, sizeof(struct mt_regulator_init_data), GFP_KERNEL);
if (!init_data)
return -ENOMEM;
switch (pdev->usid) {
case MT6315_PP:
init_data->modeset_mask[MT6315_VBUCK1] = BIT(MT6315_VBUCK1) | BIT(MT6315_VBUCK2) |
BIT(MT6315_VBUCK4);
break;
case MT6315_SP:
case MT6315_RP:
init_data->modeset_mask[MT6315_VBUCK1] = BIT(MT6315_VBUCK1) | BIT(MT6315_VBUCK2);
break;
default:
init_data->modeset_mask[MT6315_VBUCK1] = BIT(MT6315_VBUCK1);
break;
}
for (i = MT6315_VBUCK2; i < MT6315_VBUCK_MAX; i++)
init_data->modeset_mask[i] = BIT(i);
chip->dev = dev;
chip->regmap = regmap;
dev_set_drvdata(dev, chip);
config.dev = dev;
config.regmap = regmap;
for (i = MT6315_VBUCK1; i < MT6315_VBUCK_MAX; i++) {
config.driver_data = init_data;
rdev = devm_regulator_register(dev, &mt6315_regulators[i].desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "Failed to register %s\n",
mt6315_regulators[i].desc.name);
return PTR_ERR(rdev);
}
}
return 0;
}
static void mt6315_regulator_shutdown(struct spmi_device *pdev)
{
struct mt6315_chip *chip = dev_get_drvdata(&pdev->dev);
int ret = 0;
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY_H, PROTECTION_KEY_H);
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY, PROTECTION_KEY);
ret |= regmap_update_bits(chip->regmap, MT6315_TOP2_ELR7, 1, 1);
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY, 0);
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY_H, 0);
if (ret < 0)
dev_err(&pdev->dev, "[%#x] Failed to enable power off sequence. %d\n",
pdev->usid, ret);
}
static struct spmi_driver mt6315_regulator_driver = {
.driver = {
.name = "mt6315-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = mt6315_of_match,
},
.probe = mt6315_regulator_probe,
.shutdown = mt6315_regulator_shutdown,
};
module_spmi_driver(mt6315_regulator_driver);
MODULE_AUTHOR("Hsin-Hsiung Wang <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6315 PMIC");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/mt6315-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/regulator/lp872x.h>
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
/* Registers : LP8720/8725 shared */
#define LP872X_GENERAL_CFG 0x00
#define LP872X_LDO1_VOUT 0x01
#define LP872X_LDO2_VOUT 0x02
#define LP872X_LDO3_VOUT 0x03
#define LP872X_LDO4_VOUT 0x04
#define LP872X_LDO5_VOUT 0x05
/* Registers : LP8720 */
#define LP8720_BUCK_VOUT1 0x06
#define LP8720_BUCK_VOUT2 0x07
#define LP8720_ENABLE 0x08
/* Registers : LP8725 */
#define LP8725_LILO1_VOUT 0x06
#define LP8725_LILO2_VOUT 0x07
#define LP8725_BUCK1_VOUT1 0x08
#define LP8725_BUCK1_VOUT2 0x09
#define LP8725_BUCK2_VOUT1 0x0A
#define LP8725_BUCK2_VOUT2 0x0B
#define LP8725_BUCK_CTRL 0x0C
#define LP8725_LDO_CTRL 0x0D
/* Mask/shift : LP8720/LP8725 shared */
#define LP872X_VOUT_M 0x1F
#define LP872X_START_DELAY_M 0xE0
#define LP872X_START_DELAY_S 5
#define LP872X_EN_LDO1_M BIT(0)
#define LP872X_EN_LDO2_M BIT(1)
#define LP872X_EN_LDO3_M BIT(2)
#define LP872X_EN_LDO4_M BIT(3)
#define LP872X_EN_LDO5_M BIT(4)
/* Mask/shift : LP8720 */
#define LP8720_TIMESTEP_S 0 /* Addr 00h */
#define LP8720_TIMESTEP_M BIT(0)
#define LP8720_EXT_DVS_M BIT(2)
#define LP8720_BUCK_FPWM_S 5 /* Addr 07h */
#define LP8720_BUCK_FPWM_M BIT(5)
#define LP8720_EN_BUCK_M BIT(5) /* Addr 08h */
#define LP8720_DVS_SEL_M BIT(7)
/* Mask/shift : LP8725 */
#define LP8725_TIMESTEP_M 0xC0 /* Addr 00h */
#define LP8725_TIMESTEP_S 6
#define LP8725_BUCK1_EN_M BIT(0)
#define LP8725_DVS1_M BIT(2)
#define LP8725_DVS2_M BIT(3)
#define LP8725_BUCK2_EN_M BIT(4)
#define LP8725_BUCK_CL_M 0xC0 /* Addr 09h, 0Bh */
#define LP8725_BUCK_CL_S 6
#define LP8725_BUCK1_FPWM_S 1 /* Addr 0Ch */
#define LP8725_BUCK1_FPWM_M BIT(1)
#define LP8725_BUCK2_FPWM_S 5
#define LP8725_BUCK2_FPWM_M BIT(5)
#define LP8725_EN_LILO1_M BIT(5) /* Addr 0Dh */
#define LP8725_EN_LILO2_M BIT(6)
/* PWM mode */
#define LP872X_FORCE_PWM 1
#define LP872X_AUTO_PWM 0
#define LP8720_NUM_REGULATORS 6
#define LP8725_NUM_REGULATORS 9
#define EXTERN_DVS_USED 0
#define MAX_DELAY 6
/* Default DVS Mode */
#define LP8720_DEFAULT_DVS 0
#define LP8725_DEFAULT_DVS BIT(2)
/* dump registers in regmap-debugfs */
#define MAX_REGISTERS 0x0F
enum lp872x_id {
LP8720,
LP8725,
};
struct lp872x {
struct regmap *regmap;
struct device *dev;
enum lp872x_id chipid;
struct lp872x_platform_data *pdata;
int num_regulators;
enum gpiod_flags dvs_pin;
};
/* LP8720/LP8725 shared voltage table for LDOs */
static const unsigned int lp872x_ldo_vtbl[] = {
1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 2000000,
2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2650000, 2700000,
2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3100000, 3300000,
};
/* LP8720 LDO4 voltage table */
static const unsigned int lp8720_ldo4_vtbl[] = {
800000, 850000, 900000, 1000000, 1100000, 1200000, 1250000, 1300000,
1350000, 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000,
1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
2400000, 2500000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000,
};
/* LP8725 LILO(Low Input Low Output) voltage table */
static const unsigned int lp8725_lilo_vtbl[] = {
800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
2600000, 2700000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
};
/* LP8720 BUCK voltage table */
#define EXT_R 0 /* external resistor divider */
static const unsigned int lp8720_buck_vtbl[] = {
EXT_R, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000,
};
/* LP8725 BUCK voltage table */
static const unsigned int lp8725_buck_vtbl[] = {
800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
2400000, 2500000, 2600000, 2700000, 2800000, 2850000, 2900000, 3000000,
};
/* LP8725 BUCK current limit */
static const unsigned int lp8725_buck_uA[] = {
460000, 780000, 1050000, 1370000,
};
static int lp872x_read_byte(struct lp872x *lp, u8 addr, u8 *data)
{
int ret;
unsigned int val;
ret = regmap_read(lp->regmap, addr, &val);
if (ret < 0) {
dev_err(lp->dev, "failed to read 0x%.2x\n", addr);
return ret;
}
*data = (u8)val;
return 0;
}
static inline int lp872x_write_byte(struct lp872x *lp, u8 addr, u8 data)
{
return regmap_write(lp->regmap, addr, data);
}
static inline int lp872x_update_bits(struct lp872x *lp, u8 addr,
unsigned int mask, u8 data)
{
return regmap_update_bits(lp->regmap, addr, mask, data);
}
static int lp872x_get_timestep_usec(struct lp872x *lp)
{
enum lp872x_id chip = lp->chipid;
u8 val, mask, shift;
int *time_usec, size, ret;
int lp8720_time_usec[] = { 25, 50 };
int lp8725_time_usec[] = { 32, 64, 128, 256 };
switch (chip) {
case LP8720:
mask = LP8720_TIMESTEP_M;
shift = LP8720_TIMESTEP_S;
time_usec = &lp8720_time_usec[0];
size = ARRAY_SIZE(lp8720_time_usec);
break;
case LP8725:
mask = LP8725_TIMESTEP_M;
shift = LP8725_TIMESTEP_S;
time_usec = &lp8725_time_usec[0];
size = ARRAY_SIZE(lp8725_time_usec);
break;
default:
return -EINVAL;
}
ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
if (ret)
return ret;
val = (val & mask) >> shift;
if (val >= size)
return -EINVAL;
return *(time_usec + val);
}
static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id rid = rdev_get_id(rdev);
int time_step_us = lp872x_get_timestep_usec(lp);
int ret;
u8 addr, val;
if (time_step_us < 0)
return time_step_us;
switch (rid) {
case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
addr = LP872X_LDO1_VOUT + rid;
break;
case LP8725_ID_LDO1 ... LP8725_ID_BUCK1:
addr = LP872X_LDO1_VOUT + rid - LP8725_ID_BASE;
break;
case LP8725_ID_BUCK2:
addr = LP8725_BUCK2_VOUT1;
break;
default:
return -EINVAL;
}
ret = lp872x_read_byte(lp, addr, &val);
if (ret)
return ret;
val = (val & LP872X_START_DELAY_M) >> LP872X_START_DELAY_S;
return val > MAX_DELAY ? 0 : val * time_step_us;
}
static void lp872x_set_dvs(struct lp872x *lp, enum lp872x_dvs_sel dvs_sel,
struct gpio_desc *gpio)
{
enum gpiod_flags state;
state = dvs_sel == SEL_V1 ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
gpiod_set_value(gpio, state);
lp->dvs_pin = state;
}
static u8 lp872x_select_buck_vout_addr(struct lp872x *lp,
enum lp872x_regulator_id buck)
{
u8 val, addr;
if (lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val))
return 0;
switch (buck) {
case LP8720_ID_BUCK:
if (val & LP8720_EXT_DVS_M) {
addr = (lp->dvs_pin == GPIOD_OUT_HIGH) ?
LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
} else {
if (lp872x_read_byte(lp, LP8720_ENABLE, &val))
return 0;
addr = val & LP8720_DVS_SEL_M ?
LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
}
break;
case LP8725_ID_BUCK1:
if (val & LP8725_DVS1_M)
addr = LP8725_BUCK1_VOUT1;
else
addr = (lp->dvs_pin == GPIOD_OUT_HIGH) ?
LP8725_BUCK1_VOUT1 : LP8725_BUCK1_VOUT2;
break;
case LP8725_ID_BUCK2:
addr = val & LP8725_DVS2_M ?
LP8725_BUCK2_VOUT1 : LP8725_BUCK2_VOUT2;
break;
default:
return 0;
}
return addr;
}
static bool lp872x_is_valid_buck_addr(u8 addr)
{
switch (addr) {
case LP8720_BUCK_VOUT1:
case LP8720_BUCK_VOUT2:
case LP8725_BUCK1_VOUT1:
case LP8725_BUCK1_VOUT2:
case LP8725_BUCK2_VOUT1:
case LP8725_BUCK2_VOUT2:
return true;
default:
return false;
}
}
static int lp872x_buck_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, mask = LP872X_VOUT_M;
struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL;
if (dvs && dvs->gpio)
lp872x_set_dvs(lp, dvs->vsel, dvs->gpio);
addr = lp872x_select_buck_vout_addr(lp, buck);
if (!lp872x_is_valid_buck_addr(addr))
return -EINVAL;
return lp872x_update_bits(lp, addr, mask, selector);
}
static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, val;
int ret;
addr = lp872x_select_buck_vout_addr(lp, buck);
if (!lp872x_is_valid_buck_addr(addr))
return -EINVAL;
ret = lp872x_read_byte(lp, addr, &val);
if (ret)
return ret;
return val & LP872X_VOUT_M;
}
static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, mask, shift, val;
switch (buck) {
case LP8720_ID_BUCK:
addr = LP8720_BUCK_VOUT2;
mask = LP8720_BUCK_FPWM_M;
shift = LP8720_BUCK_FPWM_S;
break;
case LP8725_ID_BUCK1:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK1_FPWM_M;
shift = LP8725_BUCK1_FPWM_S;
break;
case LP8725_ID_BUCK2:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK2_FPWM_M;
shift = LP8725_BUCK2_FPWM_S;
break;
default:
return -EINVAL;
}
if (mode == REGULATOR_MODE_FAST)
val = LP872X_FORCE_PWM << shift;
else if (mode == REGULATOR_MODE_NORMAL)
val = LP872X_AUTO_PWM << shift;
else
return -EINVAL;
return lp872x_update_bits(lp, addr, mask, val);
}
static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, mask, val;
int ret;
switch (buck) {
case LP8720_ID_BUCK:
addr = LP8720_BUCK_VOUT2;
mask = LP8720_BUCK_FPWM_M;
break;
case LP8725_ID_BUCK1:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK1_FPWM_M;
break;
case LP8725_ID_BUCK2:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK2_FPWM_M;
break;
default:
return -EINVAL;
}
ret = lp872x_read_byte(lp, addr, &val);
if (ret)
return ret;
return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops lp872x_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp872x_regulator_enable_time,
};
static const struct regulator_ops lp8720_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
.get_voltage_sel = lp872x_buck_get_voltage_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp872x_regulator_enable_time,
.set_mode = lp872x_buck_set_mode,
.get_mode = lp872x_buck_get_mode,
};
static const struct regulator_ops lp8725_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
.get_voltage_sel = lp872x_buck_get_voltage_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp872x_regulator_enable_time,
.set_mode = lp872x_buck_set_mode,
.get_mode = lp872x_buck_get_mode,
.set_current_limit = regulator_set_current_limit_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_desc lp8720_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
.id = LP8720_ID_LDO1,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO1_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO1_M,
},
{
.name = "ldo2",
.of_match = of_match_ptr("ldo2"),
.id = LP8720_ID_LDO2,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO2_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO2_M,
},
{
.name = "ldo3",
.of_match = of_match_ptr("ldo3"),
.id = LP8720_ID_LDO3,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO3_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO3_M,
},
{
.name = "ldo4",
.of_match = of_match_ptr("ldo4"),
.id = LP8720_ID_LDO4,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp8720_ldo4_vtbl),
.volt_table = lp8720_ldo4_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO4_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO4_M,
},
{
.name = "ldo5",
.of_match = of_match_ptr("ldo5"),
.id = LP8720_ID_LDO5,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO5_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO5_M,
},
{
.name = "buck",
.of_match = of_match_ptr("buck"),
.id = LP8720_ID_BUCK,
.ops = &lp8720_buck_ops,
.n_voltages = ARRAY_SIZE(lp8720_buck_vtbl),
.volt_table = lp8720_buck_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP8720_EN_BUCK_M,
},
};
static const struct regulator_desc lp8725_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
.id = LP8725_ID_LDO1,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO1_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO1_M,
},
{
.name = "ldo2",
.of_match = of_match_ptr("ldo2"),
.id = LP8725_ID_LDO2,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO2_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO2_M,
},
{
.name = "ldo3",
.of_match = of_match_ptr("ldo3"),
.id = LP8725_ID_LDO3,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO3_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO3_M,
},
{
.name = "ldo4",
.of_match = of_match_ptr("ldo4"),
.id = LP8725_ID_LDO4,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO4_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO4_M,
},
{
.name = "ldo5",
.of_match = of_match_ptr("ldo5"),
.id = LP8725_ID_LDO5,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO5_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO5_M,
},
{
.name = "lilo1",
.of_match = of_match_ptr("lilo1"),
.id = LP8725_ID_LILO1,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
.volt_table = lp8725_lilo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8725_LILO1_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP8725_EN_LILO1_M,
},
{
.name = "lilo2",
.of_match = of_match_ptr("lilo2"),
.id = LP8725_ID_LILO2,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
.volt_table = lp8725_lilo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8725_LILO2_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP8725_EN_LILO2_M,
},
{
.name = "buck1",
.of_match = of_match_ptr("buck1"),
.id = LP8725_ID_BUCK1,
.ops = &lp8725_buck_ops,
.n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
.volt_table = lp8725_buck_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK1_EN_M,
.curr_table = lp8725_buck_uA,
.n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
.csel_reg = LP8725_BUCK1_VOUT2,
.csel_mask = LP8725_BUCK_CL_M,
},
{
.name = "buck2",
.of_match = of_match_ptr("buck2"),
.id = LP8725_ID_BUCK2,
.ops = &lp8725_buck_ops,
.n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
.volt_table = lp8725_buck_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK2_EN_M,
.curr_table = lp8725_buck_uA,
.n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
.csel_reg = LP8725_BUCK2_VOUT2,
.csel_mask = LP8725_BUCK_CL_M,
},
};
static int lp872x_init_dvs(struct lp872x *lp)
{
struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL;
enum gpiod_flags pinstate;
u8 mask[] = { LP8720_EXT_DVS_M, LP8725_DVS1_M | LP8725_DVS2_M };
u8 default_dvs_mode[] = { LP8720_DEFAULT_DVS, LP8725_DEFAULT_DVS };
if (!dvs)
goto set_default_dvs_mode;
if (!dvs->gpio)
goto set_default_dvs_mode;
pinstate = dvs->init_state;
dvs->gpio = devm_gpiod_get_optional(lp->dev, "ti,dvs", pinstate);
if (IS_ERR(dvs->gpio)) {
dev_err(lp->dev, "gpio request err: %ld\n", PTR_ERR(dvs->gpio));
return PTR_ERR(dvs->gpio);
}
lp->dvs_pin = pinstate;
return 0;
set_default_dvs_mode:
return lp872x_update_bits(lp, LP872X_GENERAL_CFG, mask[lp->chipid],
default_dvs_mode[lp->chipid]);
}
static int lp872x_hw_enable(struct lp872x *lp)
{
if (!lp->pdata)
return -EINVAL;
if (!lp->pdata->enable_gpio)
return 0;
/* Always set enable GPIO high. */
lp->pdata->enable_gpio = devm_gpiod_get_optional(lp->dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(lp->pdata->enable_gpio)) {
dev_err(lp->dev, "gpio request err: %ld\n", PTR_ERR(lp->pdata->enable_gpio));
return PTR_ERR(lp->pdata->enable_gpio);
}
/* Each chip has a different enable delay. */
if (lp->chipid == LP8720)
usleep_range(LP8720_ENABLE_DELAY, 1.5 * LP8720_ENABLE_DELAY);
else
usleep_range(LP8725_ENABLE_DELAY, 1.5 * LP8725_ENABLE_DELAY);
return 0;
}
static int lp872x_config(struct lp872x *lp)
{
struct lp872x_platform_data *pdata = lp->pdata;
int ret;
if (!pdata || !pdata->update_config)
goto init_dvs;
ret = lp872x_write_byte(lp, LP872X_GENERAL_CFG, pdata->general_config);
if (ret)
return ret;
init_dvs:
return lp872x_init_dvs(lp);
}
static struct regulator_init_data
*lp872x_find_regulator_init_data(int id, struct lp872x *lp)
{
struct lp872x_platform_data *pdata = lp->pdata;
int i;
if (!pdata)
return NULL;
for (i = 0; i < lp->num_regulators; i++) {
if (pdata->regulator_data[i].id == id)
return pdata->regulator_data[i].init_data;
}
return NULL;
}
static int lp872x_regulator_register(struct lp872x *lp)
{
const struct regulator_desc *desc;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
int i;
for (i = 0; i < lp->num_regulators; i++) {
desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
&lp8725_regulator_desc[i];
cfg.dev = lp->dev;
cfg.init_data = lp872x_find_regulator_init_data(desc->id, lp);
cfg.driver_data = lp;
cfg.regmap = lp->regmap;
rdev = devm_regulator_register(lp->dev, desc, &cfg);
if (IS_ERR(rdev)) {
dev_err(lp->dev, "regulator register err");
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct regmap_config lp872x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX_REGISTERS,
};
#ifdef CONFIG_OF
#define LP872X_VALID_OPMODE (REGULATOR_MODE_FAST | REGULATOR_MODE_NORMAL)
static struct of_regulator_match lp8720_matches[] = {
{ .name = "ldo1", .driver_data = (void *)LP8720_ID_LDO1, },
{ .name = "ldo2", .driver_data = (void *)LP8720_ID_LDO2, },
{ .name = "ldo3", .driver_data = (void *)LP8720_ID_LDO3, },
{ .name = "ldo4", .driver_data = (void *)LP8720_ID_LDO4, },
{ .name = "ldo5", .driver_data = (void *)LP8720_ID_LDO5, },
{ .name = "buck", .driver_data = (void *)LP8720_ID_BUCK, },
};
static struct of_regulator_match lp8725_matches[] = {
{ .name = "ldo1", .driver_data = (void *)LP8725_ID_LDO1, },
{ .name = "ldo2", .driver_data = (void *)LP8725_ID_LDO2, },
{ .name = "ldo3", .driver_data = (void *)LP8725_ID_LDO3, },
{ .name = "ldo4", .driver_data = (void *)LP8725_ID_LDO4, },
{ .name = "ldo5", .driver_data = (void *)LP8725_ID_LDO5, },
{ .name = "lilo1", .driver_data = (void *)LP8725_ID_LILO1, },
{ .name = "lilo2", .driver_data = (void *)LP8725_ID_LILO2, },
{ .name = "buck1", .driver_data = (void *)LP8725_ID_BUCK1, },
{ .name = "buck2", .driver_data = (void *)LP8725_ID_BUCK2, },
};
static struct lp872x_platform_data
*lp872x_populate_pdata_from_dt(struct device *dev, enum lp872x_id which)
{
struct device_node *np = dev->of_node;
struct lp872x_platform_data *pdata;
struct of_regulator_match *match;
int num_matches;
int count;
int i;
u8 dvs_state;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
of_property_read_u8(np, "ti,general-config", &pdata->general_config);
pdata->update_config = of_property_read_bool(np, "ti,update-config");
pdata->dvs = devm_kzalloc(dev, sizeof(struct lp872x_dvs), GFP_KERNEL);
if (!pdata->dvs)
return ERR_PTR(-ENOMEM);
of_property_read_u8(np, "ti,dvs-vsel", (u8 *)&pdata->dvs->vsel);
of_property_read_u8(np, "ti,dvs-state", &dvs_state);
pdata->dvs->init_state = dvs_state ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
if (of_get_child_count(np) == 0)
goto out;
switch (which) {
case LP8720:
match = lp8720_matches;
num_matches = ARRAY_SIZE(lp8720_matches);
break;
case LP8725:
match = lp8725_matches;
num_matches = ARRAY_SIZE(lp8725_matches);
break;
default:
goto out;
}
count = of_regulator_match(dev, np, match, num_matches);
if (count <= 0)
goto out;
for (i = 0; i < num_matches; i++) {
pdata->regulator_data[i].id =
(uintptr_t)match[i].driver_data;
pdata->regulator_data[i].init_data = match[i].init_data;
}
out:
return pdata;
}
#else
static struct lp872x_platform_data
*lp872x_populate_pdata_from_dt(struct device *dev, enum lp872x_id which)
{
return NULL;
}
#endif
static int lp872x_probe(struct i2c_client *cl)
{
const struct i2c_device_id *id = i2c_client_get_device_id(cl);
struct lp872x *lp;
struct lp872x_platform_data *pdata;
int ret;
static const int lp872x_num_regulators[] = {
[LP8720] = LP8720_NUM_REGULATORS,
[LP8725] = LP8725_NUM_REGULATORS,
};
if (cl->dev.of_node) {
pdata = lp872x_populate_pdata_from_dt(&cl->dev,
(enum lp872x_id)id->driver_data);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
} else {
pdata = dev_get_platdata(&cl->dev);
}
lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
if (!lp)
return -ENOMEM;
lp->num_regulators = lp872x_num_regulators[id->driver_data];
lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
if (IS_ERR(lp->regmap)) {
ret = PTR_ERR(lp->regmap);
dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
return ret;
}
lp->dev = &cl->dev;
lp->pdata = pdata;
lp->chipid = id->driver_data;
i2c_set_clientdata(cl, lp);
ret = lp872x_hw_enable(lp);
if (ret)
return ret;
ret = lp872x_config(lp);
if (ret)
return ret;
return lp872x_regulator_register(lp);
}
static const struct of_device_id lp872x_dt_ids[] __maybe_unused = {
{ .compatible = "ti,lp8720", },
{ .compatible = "ti,lp8725", },
{ }
};
MODULE_DEVICE_TABLE(of, lp872x_dt_ids);
static const struct i2c_device_id lp872x_ids[] = {
{"lp8720", LP8720},
{"lp8725", LP8725},
{ }
};
MODULE_DEVICE_TABLE(i2c, lp872x_ids);
static struct i2c_driver lp872x_driver = {
.driver = {
.name = "lp872x",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(lp872x_dt_ids),
},
.probe = lp872x_probe,
.id_table = lp872x_ids,
};
module_i2c_driver(lp872x_driver);
MODULE_DESCRIPTION("TI/National Semiconductor LP872x PMU Regulator Driver");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/lp872x.c |
// SPDX-License-Identifier: GPL-2.0
//
// AWINIC AW37503 Regulator Driver
//
// Copyright (C) 2023 awinic. All Rights Reserved
//
// Author: <[email protected]>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#define AW37503_REG_VPOS 0x00
#define AW37503_REG_VNEG 0x01
#define AW37503_REG_APPS 0x03
#define AW37503_REG_CONTROL 0x04
#define AW37503_REG_WPRTEN 0x21
#define AW37503_VOUT_MASK 0x1F
#define AW37503_VOUT_N_VOLTAGE 0x15
#define AW37503_VOUT_VMIN 4000000
#define AW37503_VOUT_VMAX 6000000
#define AW37503_VOUT_STEP 100000
#define AW37503_REG_APPS_DIS_VPOS BIT(1)
#define AW37503_REG_APPS_DIS_VNEG BIT(0)
#define AW37503_REGULATOR_ID_VPOS 0
#define AW37503_REGULATOR_ID_VNEG 1
#define AW37503_MAX_REGULATORS 2
struct aw37503_reg_pdata {
struct gpio_desc *en_gpiod;
int ena_gpio_state;
};
struct aw37503_regulator {
struct device *dev;
struct aw37503_reg_pdata reg_pdata[AW37503_MAX_REGULATORS];
};
static int aw37503_regulator_enable(struct regulator_dev *rdev)
{
struct aw37503_regulator *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[id];
int ret;
if (!IS_ERR(rpdata->en_gpiod)) {
gpiod_set_value_cansleep(rpdata->en_gpiod, 1);
rpdata->ena_gpio_state = 1;
}
/* Hardware automatically enable discharge bit in enable */
if (rdev->constraints->active_discharge ==
REGULATOR_ACTIVE_DISCHARGE_DISABLE) {
ret = regulator_set_active_discharge_regmap(rdev, false);
if (ret < 0) {
dev_err(chip->dev, "Failed to disable active discharge: %d\n",
ret);
return ret;
}
}
return 0;
}
static int aw37503_regulator_disable(struct regulator_dev *rdev)
{
struct aw37503_regulator *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[id];
if (!IS_ERR(rpdata->en_gpiod)) {
gpiod_set_value_cansleep(rpdata->en_gpiod, 0);
rpdata->ena_gpio_state = 0;
}
return 0;
}
static int aw37503_regulator_is_enabled(struct regulator_dev *rdev)
{
struct aw37503_regulator *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[id];
if (!IS_ERR(rpdata->en_gpiod))
return rpdata->ena_gpio_state;
return 1;
}
static const struct regulator_ops aw37503_regulator_ops = {
.enable = aw37503_regulator_enable,
.disable = aw37503_regulator_disable,
.is_enabled = aw37503_regulator_is_enabled,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
};
static int aw37503_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct aw37503_regulator *chip = config->driver_data;
struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[desc->id];
int ret;
rpdata->en_gpiod = devm_fwnode_gpiod_get(chip->dev, of_fwnode_handle(np),
"enable", GPIOD_OUT_LOW,
"enable");
if (IS_ERR(rpdata->en_gpiod)) {
ret = PTR_ERR(rpdata->en_gpiod);
/* Ignore the error other than probe defer */
if (ret == -EPROBE_DEFER)
return ret;
return 0;
}
return 0;
}
#define AW37503_REGULATOR_DESC(_id, _name) \
[AW37503_REGULATOR_ID_##_id] = { \
.name = "aw37503-"#_name, \
.supply_name = "vin", \
.id = AW37503_REGULATOR_ID_##_id, \
.of_match = of_match_ptr(#_name), \
.of_parse_cb = aw37503_of_parse_cb, \
.ops = &aw37503_regulator_ops, \
.n_voltages = AW37503_VOUT_N_VOLTAGE, \
.min_uV = AW37503_VOUT_VMIN, \
.uV_step = AW37503_VOUT_STEP, \
.enable_time = 500, \
.vsel_mask = AW37503_VOUT_MASK, \
.vsel_reg = AW37503_REG_##_id, \
.active_discharge_off = 0, \
.active_discharge_on = AW37503_REG_APPS_DIS_##_id, \
.active_discharge_mask = AW37503_REG_APPS_DIS_##_id, \
.active_discharge_reg = AW37503_REG_APPS, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
static const struct regulator_desc aw_regs_desc[AW37503_MAX_REGULATORS] = {
AW37503_REGULATOR_DESC(VPOS, outp),
AW37503_REGULATOR_DESC(VNEG, outn),
};
static const struct regmap_range aw37503_no_reg_ranges[] = {
regmap_reg_range(AW37503_REG_CONTROL + 1,
AW37503_REG_WPRTEN - 1),
};
static const struct regmap_access_table aw37503_no_reg_table = {
.no_ranges = aw37503_no_reg_ranges,
.n_no_ranges = ARRAY_SIZE(aw37503_no_reg_ranges),
};
static const struct regmap_config aw37503_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = AW37503_REG_WPRTEN,
.rd_table = &aw37503_no_reg_table,
.wr_table = &aw37503_no_reg_table,
};
static int aw37503_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct aw37503_regulator *chip;
struct regulator_dev *rdev;
struct regmap *regmap;
struct regulator_config config = { };
int id;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
regmap = devm_regmap_init_i2c(client, &aw37503_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"Failed to init regmap\n");
i2c_set_clientdata(client, chip);
chip->dev = dev;
config.regmap = regmap;
config.dev = dev;
config.driver_data = chip;
for (id = 0; id < AW37503_MAX_REGULATORS; ++id) {
rdev = devm_regulator_register(dev, &aw_regs_desc[id],
&config);
if (IS_ERR(rdev))
return dev_err_probe(dev, PTR_ERR(rdev),
"Failed to register regulator %s\n",
aw_regs_desc[id].name);
}
return 0;
}
static const struct i2c_device_id aw37503_id[] = {
{.name = "aw37503",},
{},
};
MODULE_DEVICE_TABLE(i2c, aw37503_id);
static const struct of_device_id aw37503_of_match[] = {
{.compatible = "awinic,aw37503",},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, aw37503_of_match);
static struct i2c_driver aw37503_i2c_driver = {
.driver = {
.name = "aw37503",
.of_match_table = aw37503_of_match,
},
.probe = aw37503_probe,
.id_table = aw37503_id,
};
module_i2c_driver(aw37503_i2c_driver);
MODULE_DESCRIPTION("aw37503 regulator driver");
MODULE_AUTHOR("Alec Li <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/aw37503-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/pfuze100.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#define PFUZE_FLAG_DISABLE_SW BIT(1)
#define PFUZE_NUMREGS 128
#define PFUZE100_VOL_OFFSET 0
#define PFUZE100_STANDBY_OFFSET 1
#define PFUZE100_MODE_OFFSET 3
#define PFUZE100_CONF_OFFSET 4
#define PFUZE100_DEVICEID 0x0
#define PFUZE100_REVID 0x3
#define PFUZE100_FABID 0x4
#define PFUZE100_COINVOL 0x1a
#define PFUZE100_SW1ABVOL 0x20
#define PFUZE100_SW1ABMODE 0x23
#define PFUZE100_SW1CVOL 0x2e
#define PFUZE100_SW1CMODE 0x31
#define PFUZE100_SW2VOL 0x35
#define PFUZE100_SW2MODE 0x38
#define PFUZE100_SW3AVOL 0x3c
#define PFUZE100_SW3AMODE 0x3f
#define PFUZE100_SW3BVOL 0x43
#define PFUZE100_SW3BMODE 0x46
#define PFUZE100_SW4VOL 0x4a
#define PFUZE100_SW4MODE 0x4d
#define PFUZE100_SWBSTCON1 0x66
#define PFUZE100_VREFDDRCON 0x6a
#define PFUZE100_VSNVSVOL 0x6b
#define PFUZE100_VGEN1VOL 0x6c
#define PFUZE100_VGEN2VOL 0x6d
#define PFUZE100_VGEN3VOL 0x6e
#define PFUZE100_VGEN4VOL 0x6f
#define PFUZE100_VGEN5VOL 0x70
#define PFUZE100_VGEN6VOL 0x71
#define PFUZE100_SWxMODE_MASK 0xf
#define PFUZE100_SWxMODE_APS_APS 0x8
#define PFUZE100_SWxMODE_APS_OFF 0x4
#define PFUZE100_VGENxLPWR BIT(6)
#define PFUZE100_VGENxSTBY BIT(5)
enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3, PFUZE3001 = 0x31, };
struct pfuze_regulator {
struct regulator_desc desc;
unsigned char stby_reg;
unsigned char stby_mask;
bool sw_reg;
};
struct pfuze_chip {
int chip_id;
int flags;
struct regmap *regmap;
struct device *dev;
struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
struct regulator_dev *regulators[PFUZE100_MAX_REGULATOR];
struct pfuze_regulator *pfuze_regulators;
};
static const int pfuze100_swbst[] = {
5000000, 5050000, 5100000, 5150000,
};
static const int pfuze100_vsnvs[] = {
1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
};
static const int pfuze100_coin[] = {
2500000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000,
};
static const int pfuze3000_sw1a[] = {
700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000,
1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000,
1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1800000, 3300000,
};
static const int pfuze3000_sw2lo[] = {
1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
};
static const int pfuze3000_sw2hi[] = {
2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000,
};
static const struct of_device_id pfuze_dt_ids[] = {
{ .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
{ .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
{ .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
{ .compatible = "fsl,pfuze3001", .data = (void *)PFUZE3001},
{ }
};
MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
bool reg_has_ramp_delay;
unsigned int ramp_bits = 0;
int ret;
switch (pfuze100->chip_id) {
case PFUZE3001:
/* no dynamic voltage scaling for PF3001 */
reg_has_ramp_delay = false;
break;
case PFUZE3000:
reg_has_ramp_delay = (id < PFUZE3000_SWBST);
break;
case PFUZE200:
reg_has_ramp_delay = (id < PFUZE200_SWBST);
break;
case PFUZE100:
default:
reg_has_ramp_delay = (id < PFUZE100_SWBST);
break;
}
if (reg_has_ramp_delay) {
if (ramp_delay > 0) {
ramp_delay = 12500 / ramp_delay;
ramp_bits = (ramp_delay >> 1) - (ramp_delay >> 3);
}
ret = regmap_update_bits(pfuze100->regmap,
rdev->desc->vsel_reg + 4,
0xc0, ramp_bits << 6);
if (ret < 0)
dev_err(pfuze100->dev, "ramp failed, err %d\n", ret);
} else {
ret = -EACCES;
}
return ret;
}
static const struct regulator_ops pfuze100_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
static const struct regulator_ops pfuze100_fixed_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
};
static const struct regulator_ops pfuze100_sw_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = pfuze100_set_ramp_delay,
};
static const struct regulator_ops pfuze100_sw_disable_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = pfuze100_set_ramp_delay,
};
static const struct regulator_ops pfuze100_swb_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
static const struct regulator_ops pfuze3000_sw_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = pfuze100_set_ramp_delay,
};
#define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
[_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = 1, \
.ops = &pfuze100_fixed_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (voltage), \
.enable_reg = (base), \
.enable_mask = 0x10, \
}, \
}
#define PFUZE100_SW_REG(_chip, _name, base, min, max, step) \
[_chip ## _ ## _name] = { \
.desc = { \
.name = #_name,\
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_sw_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
.vsel_mask = 0x3f, \
.enable_reg = (base) + PFUZE100_MODE_OFFSET, \
.enable_mask = 0xf, \
}, \
.stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
.stby_mask = 0x3f, \
.sw_reg = true, \
}
#define PFUZE100_SWB_REG(_chip, _name, base, mask, voltages) \
[_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(voltages), \
.ops = &pfuze100_swb_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.volt_table = voltages, \
.vsel_reg = (base), \
.vsel_mask = (mask), \
.enable_reg = (base), \
.enable_mask = 0x48, \
}, \
}
#define PFUZE100_VGEN_REG(_chip, _name, base, min, max, step) \
[_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_ldo_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (base), \
.vsel_mask = 0xf, \
.enable_reg = (base), \
.enable_mask = 0x10, \
}, \
.stby_reg = (base), \
.stby_mask = 0x20, \
}
#define PFUZE100_COIN_REG(_chip, _name, base, mask, voltages) \
[_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(voltages), \
.ops = &pfuze100_swb_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.volt_table = voltages, \
.vsel_reg = (base), \
.vsel_mask = (mask), \
.enable_reg = (base), \
.enable_mask = 0x8, \
}, \
}
#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step) { \
.desc = { \
.name = #_name, \
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_ldo_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (base), \
.vsel_mask = 0x3, \
.enable_reg = (base), \
.enable_mask = 0x10, \
}, \
.stby_reg = (base), \
.stby_mask = 0x20, \
}
/* No linar case for the some switches of PFUZE3000 */
#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
[_chip ## _ ## _name] = { \
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(voltages), \
.ops = &pfuze3000_sw_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.volt_table = voltages, \
.vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
.vsel_mask = (mask), \
.enable_reg = (base) + PFUZE100_MODE_OFFSET, \
.enable_mask = 0xf, \
.enable_val = 0x8, \
.enable_time = 500, \
}, \
.stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
.stby_mask = (mask), \
.sw_reg = true, \
}
#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
.desc = { \
.name = #_name,\
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &pfuze100_sw_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _chip ## _ ## _name, \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
.vsel_mask = 0xf, \
}, \
.stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
.stby_mask = 0xf, \
}
/* PFUZE100 */
static struct pfuze_regulator pfuze100_regulators[] = {
PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
PFUZE100_SW_REG(PFUZE100, SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
PFUZE100_SW_REG(PFUZE100, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
PFUZE100_SW_REG(PFUZE100, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
PFUZE100_SW_REG(PFUZE100, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
PFUZE100_SW_REG(PFUZE100, SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
PFUZE100_SWB_REG(PFUZE100, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
PFUZE100_SWB_REG(PFUZE100, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_FIXED_REG(PFUZE100, VREFDDR, PFUZE100_VREFDDRCON, 750000),
PFUZE100_VGEN_REG(PFUZE100, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
PFUZE100_VGEN_REG(PFUZE100, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
PFUZE100_VGEN_REG(PFUZE100, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE100, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE100, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE100, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
PFUZE100_COIN_REG(PFUZE100, COIN, PFUZE100_COINVOL, 0x7, pfuze100_coin),
};
static struct pfuze_regulator pfuze200_regulators[] = {
PFUZE100_SW_REG(PFUZE200, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
PFUZE100_SW_REG(PFUZE200, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
PFUZE100_SW_REG(PFUZE200, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
PFUZE100_SW_REG(PFUZE200, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
PFUZE100_SWB_REG(PFUZE200, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
PFUZE100_SWB_REG(PFUZE200, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_FIXED_REG(PFUZE200, VREFDDR, PFUZE100_VREFDDRCON, 750000),
PFUZE100_VGEN_REG(PFUZE200, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
PFUZE100_VGEN_REG(PFUZE200, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
PFUZE100_VGEN_REG(PFUZE200, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE200, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE200, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
PFUZE100_COIN_REG(PFUZE200, COIN, PFUZE100_COINVOL, 0x7, pfuze100_coin),
};
static struct pfuze_regulator pfuze3000_regulators[] = {
PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000),
PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
static struct pfuze_regulator pfuze3001_regulators[] = {
PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE3001, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
PFUZE3000_VCC_REG(PFUZE3001, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
PFUZE3000_VCC_REG(PFUZE3001, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
PFUZE100_VGEN_REG(PFUZE3001, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE3001, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
/* PFUZE100 */
static struct of_regulator_match pfuze100_matches[] = {
{ .name = "sw1ab", },
{ .name = "sw1c", },
{ .name = "sw2", },
{ .name = "sw3a", },
{ .name = "sw3b", },
{ .name = "sw4", },
{ .name = "swbst", },
{ .name = "vsnvs", },
{ .name = "vrefddr", },
{ .name = "vgen1", },
{ .name = "vgen2", },
{ .name = "vgen3", },
{ .name = "vgen4", },
{ .name = "vgen5", },
{ .name = "vgen6", },
{ .name = "coin", },
};
/* PFUZE200 */
static struct of_regulator_match pfuze200_matches[] = {
{ .name = "sw1ab", },
{ .name = "sw2", },
{ .name = "sw3a", },
{ .name = "sw3b", },
{ .name = "swbst", },
{ .name = "vsnvs", },
{ .name = "vrefddr", },
{ .name = "vgen1", },
{ .name = "vgen2", },
{ .name = "vgen3", },
{ .name = "vgen4", },
{ .name = "vgen5", },
{ .name = "vgen6", },
{ .name = "coin", },
};
/* PFUZE3000 */
static struct of_regulator_match pfuze3000_matches[] = {
{ .name = "sw1a", },
{ .name = "sw1b", },
{ .name = "sw2", },
{ .name = "sw3", },
{ .name = "swbst", },
{ .name = "vsnvs", },
{ .name = "vrefddr", },
{ .name = "vldo1", },
{ .name = "vldo2", },
{ .name = "vccsd", },
{ .name = "v33", },
{ .name = "vldo3", },
{ .name = "vldo4", },
};
/* PFUZE3001 */
static struct of_regulator_match pfuze3001_matches[] = {
{ .name = "sw1", },
{ .name = "sw2", },
{ .name = "sw3", },
{ .name = "vsnvs", },
{ .name = "vldo1", },
{ .name = "vldo2", },
{ .name = "vccsd", },
{ .name = "v33", },
{ .name = "vldo3", },
{ .name = "vldo4", },
};
static struct of_regulator_match *pfuze_matches;
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
{
struct device *dev = chip->dev;
struct device_node *np, *parent;
int ret;
np = of_node_get(dev->of_node);
if (!np)
return -EINVAL;
if (of_property_read_bool(np, "fsl,pfuze-support-disable-sw"))
chip->flags |= PFUZE_FLAG_DISABLE_SW;
parent = of_get_child_by_name(np, "regulators");
if (!parent) {
dev_err(dev, "regulators node not found\n");
of_node_put(np);
return -EINVAL;
}
switch (chip->chip_id) {
case PFUZE3001:
pfuze_matches = pfuze3001_matches;
ret = of_regulator_match(dev, parent, pfuze3001_matches,
ARRAY_SIZE(pfuze3001_matches));
break;
case PFUZE3000:
pfuze_matches = pfuze3000_matches;
ret = of_regulator_match(dev, parent, pfuze3000_matches,
ARRAY_SIZE(pfuze3000_matches));
break;
case PFUZE200:
pfuze_matches = pfuze200_matches;
ret = of_regulator_match(dev, parent, pfuze200_matches,
ARRAY_SIZE(pfuze200_matches));
break;
case PFUZE100:
default:
pfuze_matches = pfuze100_matches;
ret = of_regulator_match(dev, parent, pfuze100_matches,
ARRAY_SIZE(pfuze100_matches));
break;
}
of_node_put(parent);
of_node_put(np);
if (ret < 0) {
dev_err(dev, "Error parsing regulator init data: %d\n",
ret);
return ret;
}
return 0;
}
static inline struct regulator_init_data *match_init_data(int index)
{
return pfuze_matches[index].init_data;
}
static inline struct device_node *match_of_node(int index)
{
return pfuze_matches[index].of_node;
}
static int pfuze_power_off_prepare(struct sys_off_data *data)
{
struct pfuze_chip *syspm_pfuze_chip = data->cb_data;
dev_info(syspm_pfuze_chip->dev, "Configure standby mode for power off");
/* Switch from default mode: APS/APS to APS/Off */
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW1ABMODE,
PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW1CMODE,
PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW2MODE,
PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW3AMODE,
PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW3BMODE,
PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW4MODE,
PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN1VOL,
PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
PFUZE100_VGENxSTBY);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN2VOL,
PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
PFUZE100_VGENxSTBY);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN3VOL,
PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
PFUZE100_VGENxSTBY);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN4VOL,
PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
PFUZE100_VGENxSTBY);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN5VOL,
PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
PFUZE100_VGENxSTBY);
regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN6VOL,
PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
PFUZE100_VGENxSTBY);
return NOTIFY_DONE;
}
static int pfuze_power_off_prepare_init(struct pfuze_chip *pfuze_chip)
{
int err;
if (pfuze_chip->chip_id != PFUZE100) {
dev_warn(pfuze_chip->dev, "Requested pm_power_off_prepare handler for not supported chip\n");
return -ENODEV;
}
err = devm_register_sys_off_handler(pfuze_chip->dev,
SYS_OFF_MODE_POWER_OFF_PREPARE,
SYS_OFF_PRIO_DEFAULT,
pfuze_power_off_prepare,
pfuze_chip);
if (err) {
dev_err(pfuze_chip->dev, "failed to register sys-off handler: %d\n",
err);
return err;
}
return 0;
}
static int pfuze_identify(struct pfuze_chip *pfuze_chip)
{
unsigned int value;
int ret;
ret = regmap_read(pfuze_chip->regmap, PFUZE100_DEVICEID, &value);
if (ret)
return ret;
if (((value & 0x0f) == 0x8) && (pfuze_chip->chip_id == PFUZE100)) {
/*
* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013
* as ID=8 in PFUZE100
*/
dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
} else if ((value & 0x0f) != pfuze_chip->chip_id &&
(value & 0xf0) >> 4 != pfuze_chip->chip_id &&
(value != pfuze_chip->chip_id)) {
/* device id NOT match with your setting */
dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
return -ENODEV;
}
ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
if (ret)
return ret;
dev_info(pfuze_chip->dev,
"Full layer: %x, Metal layer: %x\n",
(value & 0xf0) >> 4, value & 0x0f);
ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value);
if (ret)
return ret;
dev_info(pfuze_chip->dev, "FAB: %x, FIN: %x\n",
(value & 0xc) >> 2, value & 0x3);
return 0;
}
static const struct regmap_config pfuze_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = PFUZE_NUMREGS - 1,
.cache_type = REGCACHE_RBTREE,
};
static int pfuze100_regulator_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct pfuze_chip *pfuze_chip;
struct regulator_config config = { };
int i, ret;
const struct of_device_id *match;
u32 regulator_num;
u32 sw_check_start, sw_check_end, sw_hi = 0x40;
pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
GFP_KERNEL);
if (!pfuze_chip)
return -ENOMEM;
if (client->dev.of_node) {
match = of_match_device(pfuze_dt_ids, &client->dev);
if (!match) {
dev_err(&client->dev, "Error: No device match found\n");
return -ENODEV;
}
pfuze_chip->chip_id = (int)(long)match->data;
} else if (id) {
pfuze_chip->chip_id = id->driver_data;
} else {
dev_err(&client->dev, "No dts match or id table match found\n");
return -ENODEV;
}
i2c_set_clientdata(client, pfuze_chip);
pfuze_chip->dev = &client->dev;
pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config);
if (IS_ERR(pfuze_chip->regmap)) {
ret = PTR_ERR(pfuze_chip->regmap);
dev_err(&client->dev,
"regmap allocation failed with err %d\n", ret);
return ret;
}
ret = pfuze_identify(pfuze_chip);
if (ret) {
dev_err(&client->dev, "unrecognized pfuze chip ID!\n");
return ret;
}
/* use the right regulators after identify the right device */
switch (pfuze_chip->chip_id) {
case PFUZE3001:
pfuze_chip->pfuze_regulators = pfuze3001_regulators;
regulator_num = ARRAY_SIZE(pfuze3001_regulators);
sw_check_start = PFUZE3001_SW2;
sw_check_end = PFUZE3001_SW2;
sw_hi = 1 << 3;
break;
case PFUZE3000:
pfuze_chip->pfuze_regulators = pfuze3000_regulators;
regulator_num = ARRAY_SIZE(pfuze3000_regulators);
sw_check_start = PFUZE3000_SW2;
sw_check_end = PFUZE3000_SW2;
sw_hi = 1 << 3;
break;
case PFUZE200:
pfuze_chip->pfuze_regulators = pfuze200_regulators;
regulator_num = ARRAY_SIZE(pfuze200_regulators);
sw_check_start = PFUZE200_SW2;
sw_check_end = PFUZE200_SW3B;
break;
case PFUZE100:
default:
pfuze_chip->pfuze_regulators = pfuze100_regulators;
regulator_num = ARRAY_SIZE(pfuze100_regulators);
sw_check_start = PFUZE100_SW2;
sw_check_end = PFUZE100_SW4;
break;
}
dev_info(&client->dev, "pfuze%s found.\n",
(pfuze_chip->chip_id == PFUZE100) ? "100" :
(((pfuze_chip->chip_id == PFUZE200) ? "200" :
((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
regulator_num * sizeof(struct pfuze_regulator));
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
return ret;
for (i = 0; i < regulator_num; i++) {
struct regulator_init_data *init_data;
struct regulator_desc *desc;
int val;
desc = &pfuze_chip->regulator_descs[i].desc;
init_data = match_init_data(i);
/* SW2~SW4 high bit check and modify the voltage value table */
if (i >= sw_check_start && i <= sw_check_end) {
ret = regmap_read(pfuze_chip->regmap,
desc->vsel_reg, &val);
if (ret) {
dev_err(&client->dev, "Fails to read from the register.\n");
return ret;
}
if (val & sw_hi) {
if (pfuze_chip->chip_id == PFUZE3000 ||
pfuze_chip->chip_id == PFUZE3001) {
desc->volt_table = pfuze3000_sw2hi;
desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
} else {
desc->min_uV = 800000;
desc->uV_step = 50000;
desc->n_voltages = 51;
}
}
}
/*
* Allow SW regulators to turn off. Checking it trough a flag is
* a workaround to keep the backward compatibility with existing
* old dtb's which may relay on the fact that we didn't disable
* the switched regulator till yet.
*/
if (pfuze_chip->flags & PFUZE_FLAG_DISABLE_SW) {
if (pfuze_chip->chip_id == PFUZE100 ||
pfuze_chip->chip_id == PFUZE200) {
if (pfuze_chip->regulator_descs[i].sw_reg) {
desc->ops = &pfuze100_sw_disable_regulator_ops;
desc->enable_val = 0x8;
desc->disable_val = 0x0;
desc->enable_time = 500;
}
}
}
config.dev = &client->dev;
config.init_data = init_data;
config.driver_data = pfuze_chip;
config.of_node = match_of_node(i);
pfuze_chip->regulators[i] =
devm_regulator_register(&client->dev, desc, &config);
if (IS_ERR(pfuze_chip->regulators[i])) {
dev_err(&client->dev, "register regulator%s failed\n",
pfuze_chip->pfuze_regulators[i].desc.name);
return PTR_ERR(pfuze_chip->regulators[i]);
}
}
if (of_property_read_bool(client->dev.of_node,
"fsl,pmic-stby-poweroff"))
return pfuze_power_off_prepare_init(pfuze_chip);
return 0;
}
static struct i2c_driver pfuze_driver = {
.driver = {
.name = "pfuze100-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = pfuze_dt_ids,
},
.probe = pfuze100_regulator_probe,
};
module_i2c_driver(pfuze_driver);
MODULE_AUTHOR("Robin Gong <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/200/3000/3001 PMIC");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/pfuze100-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2022 MediaTek Inc.
// Copyright (c) 2022 BayLibre, SAS.
// Author: Chen Zhong <[email protected]>
// Author: Fabien Parent <[email protected]>
// Author: Alexandre Mergnat <[email protected]>
//
// Based on mt6397-regulator.c
//
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6357/registers.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/mt6357-regulator.h>
#include <linux/regulator/of_regulator.h>
/*
* MT6357 regulators' information
*
* @desc: standard fields of regulator description.
* @da_vsel_reg: Monitor register for query buck's voltage.
* @da_vsel_mask: Mask for query buck's voltage.
*/
struct mt6357_regulator_info {
struct regulator_desc desc;
u32 da_vsel_reg;
u32 da_vsel_mask;
};
#define MT6357_BUCK(match, vreg, min, max, step, \
volt_ranges, vosel_reg, vosel_mask, _da_vsel_mask) \
[MT6357_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.regulators_node = "regulators", \
.ops = &mt6357_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6357_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ((max) - (min)) / (step) + 1, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = vosel_reg, \
.vsel_mask = vosel_mask, \
.enable_reg = MT6357_BUCK_##vreg##_CON0, \
.enable_mask = BIT(0), \
}, \
.da_vsel_reg = MT6357_BUCK_##vreg##_DBG0, \
.da_vsel_mask = vosel_mask, \
}
#define MT6357_LDO(match, vreg, ldo_volt_table, \
enreg, vosel, vosel_mask) \
[MT6357_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.regulators_node = "regulators", \
.ops = &mt6357_volt_table_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6357_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.volt_table = ldo_volt_table, \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(0), \
}, \
}
#define MT6357_LDO1(match, vreg, min, max, step, volt_ranges, \
enreg, vosel, vosel_mask) \
[MT6357_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.regulators_node = "regulators", \
.ops = &mt6357_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6357_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ((max) - (min)) / (step) + 1, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(0), \
}, \
.da_vsel_reg = MT6357_LDO_##vreg##_DBG0, \
.da_vsel_mask = 0x7f00, \
}
#define MT6357_REG_FIXED(match, vreg, volt) \
[MT6357_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.regulators_node = "regulators", \
.ops = &mt6357_volt_fixed_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6357_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = 1, \
.enable_reg = MT6357_LDO_##vreg##_CON0, \
.enable_mask = BIT(0), \
.min_uV = volt, \
}, \
}
/**
* mt6357_get_buck_voltage_sel - get_voltage_sel for regmap users
*
* @rdev: regulator to operate on
*
* Regulators that use regmap for their register I/O can set the
* da_vsel_reg and da_vsel_mask fields in the info structure and
* then use this as their get_voltage_vsel operation.
*/
static int mt6357_get_buck_voltage_sel(struct regulator_dev *rdev)
{
int ret, regval;
struct mt6357_regulator_info *info = rdev_get_drvdata(rdev);
ret = regmap_read(rdev->regmap, info->da_vsel_reg, ®val);
if (ret != 0) {
dev_err(&rdev->dev,
"Failed to get mt6357 Buck %s vsel reg: %d\n",
info->desc.name, ret);
return ret;
}
regval &= info->da_vsel_mask;
regval >>= ffs(info->da_vsel_mask) - 1;
return regval;
}
static const struct regulator_ops mt6357_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = mt6357_get_buck_voltage_sel,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_ops mt6357_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_ops mt6357_volt_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const int vxo22_voltages[] = {
2200000,
0,
2400000,
};
static const int vefuse_voltages[] = {
1200000,
1300000,
1500000,
0,
1800000,
0,
0,
0,
0,
2800000,
2900000,
3000000,
0,
3300000,
};
static const int vcn33_voltages[] = {
0,
3300000,
3400000,
3500000,
};
static const int vcama_voltages[] = {
0,
0,
0,
0,
0,
0,
0,
2500000,
0,
0,
2800000,
};
static const int vcamd_voltages[] = {
0,
0,
0,
0,
1000000,
1100000,
1200000,
1300000,
0,
1500000,
0,
0,
1800000,
};
static const int vldo28_voltages[] = {
0,
2800000,
0,
3000000,
};
static const int vdram_voltages[] = {
0,
1100000,
1200000,
};
static const int vsim_voltages[] = {
0,
0,
0,
1700000,
1800000,
0,
0,
0,
2700000,
0,
0,
3000000,
3100000,
};
static const int vibr_voltages[] = {
1200000,
1300000,
1500000,
0,
1800000,
2000000,
0,
0,
0,
2800000,
0,
3000000,
0,
3300000,
};
static const int vmc_voltages[] = {
0,
0,
0,
0,
1800000,
0,
0,
0,
0,
0,
2900000,
3000000,
0,
3300000,
};
static const int vmch_voltages[] = {
0,
0,
2900000,
3000000,
0,
3300000,
};
static const int vemc_voltages[] = {
0,
0,
2900000,
3000000,
0,
3300000,
};
static const int vusb_voltages[] = {
0,
0,
0,
3000000,
3100000,
};
static const struct linear_range buck_volt_range1[] = {
REGULATOR_LINEAR_RANGE(518750, 0, 0x7f, 6250),
};
static const struct linear_range buck_volt_range2[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250),
};
static const struct linear_range buck_volt_range3[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
};
static const struct linear_range buck_volt_range4[] = {
REGULATOR_LINEAR_RANGE(1200000, 0, 0x7f, 12500),
};
/* The array is indexed by id(MT6357_ID_XXX) */
static struct mt6357_regulator_info mt6357_regulators[] = {
/* Bucks */
MT6357_BUCK("buck-vcore", VCORE, 518750, 1312500, 6250,
buck_volt_range1, MT6357_BUCK_VCORE_ELR0, 0x7f, 0x7f),
MT6357_BUCK("buck-vproc", VPROC, 518750, 1312500, 6250,
buck_volt_range1, MT6357_BUCK_VPROC_ELR0, 0x7f, 0x7f),
MT6357_BUCK("buck-vmodem", VMODEM, 500000, 1293750, 6250,
buck_volt_range2, MT6357_BUCK_VMODEM_ELR0, 0x7f, 0x7f),
MT6357_BUCK("buck-vpa", VPA, 500000, 3650000, 50000,
buck_volt_range3, MT6357_BUCK_VPA_CON1, 0x3f, 0x3f),
MT6357_BUCK("buck-vs1", VS1, 1200000, 2787500, 12500,
buck_volt_range4, MT6357_BUCK_VS1_ELR0, 0x7f, 0x7f),
/* LDOs */
MT6357_LDO("ldo-vcama", VCAMA, vcama_voltages,
MT6357_LDO_VCAMA_CON0, MT6357_VCAMA_ANA_CON0, 0xf00),
MT6357_LDO("ldo-vcamd", VCAMD, vcamd_voltages,
MT6357_LDO_VCAMD_CON0, MT6357_VCAMD_ANA_CON0, 0xf00),
MT6357_LDO("ldo-vcn33-bt", VCN33_BT, vcn33_voltages,
MT6357_LDO_VCN33_CON0_0, MT6357_VCN33_ANA_CON0, 0x300),
MT6357_LDO("ldo-vcn33-wifi", VCN33_WIFI, vcn33_voltages,
MT6357_LDO_VCN33_CON0_1, MT6357_VCN33_ANA_CON0, 0x300),
MT6357_LDO("ldo-vdram", VDRAM, vdram_voltages,
MT6357_LDO_VDRAM_CON0, MT6357_VDRAM_ELR_2, 0x300),
MT6357_LDO("ldo-vefuse", VEFUSE, vefuse_voltages,
MT6357_LDO_VEFUSE_CON0, MT6357_VEFUSE_ANA_CON0, 0xf00),
MT6357_LDO("ldo-vemc", VEMC, vemc_voltages,
MT6357_LDO_VEMC_CON0, MT6357_VEMC_ANA_CON0, 0x700),
MT6357_LDO("ldo-vibr", VIBR, vibr_voltages,
MT6357_LDO_VIBR_CON0, MT6357_VIBR_ANA_CON0, 0xf00),
MT6357_LDO("ldo-vldo28", VLDO28, vldo28_voltages,
MT6357_LDO_VLDO28_CON0_0, MT6357_VLDO28_ANA_CON0, 0x300),
MT6357_LDO("ldo-vmc", VMC, vmc_voltages,
MT6357_LDO_VMC_CON0, MT6357_VMC_ANA_CON0, 0xf00),
MT6357_LDO("ldo-vmch", VMCH, vmch_voltages,
MT6357_LDO_VMCH_CON0, MT6357_VMCH_ANA_CON0, 0x700),
MT6357_LDO("ldo-vsim1", VSIM1, vsim_voltages,
MT6357_LDO_VSIM1_CON0, MT6357_VSIM1_ANA_CON0, 0xf00),
MT6357_LDO("ldo-vsim2", VSIM2, vsim_voltages,
MT6357_LDO_VSIM2_CON0, MT6357_VSIM2_ANA_CON0, 0xf00),
MT6357_LDO("ldo-vusb33", VUSB33, vusb_voltages,
MT6357_LDO_VUSB33_CON0_0, MT6357_VUSB33_ANA_CON0, 0x700),
MT6357_LDO("ldo-vxo22", VXO22, vxo22_voltages,
MT6357_LDO_VXO22_CON0, MT6357_VXO22_ANA_CON0, 0x300),
MT6357_LDO1("ldo-vsram-proc", VSRAM_PROC, 518750, 1312500, 6250,
buck_volt_range1, MT6357_LDO_VSRAM_PROC_CON0,
MT6357_LDO_VSRAM_CON0, 0x7f00),
MT6357_LDO1("ldo-vsram-others", VSRAM_OTHERS, 518750, 1312500, 6250,
buck_volt_range1, MT6357_LDO_VSRAM_OTHERS_CON0,
MT6357_LDO_VSRAM_CON1, 0x7f00),
MT6357_REG_FIXED("ldo-vaud28", VAUD28, 2800000),
MT6357_REG_FIXED("ldo-vaux18", VAUX18, 1800000),
MT6357_REG_FIXED("ldo-vcamio18", VCAMIO, 1800000),
MT6357_REG_FIXED("ldo-vcn18", VCN18, 1800000),
MT6357_REG_FIXED("ldo-vcn28", VCN28, 2800000),
MT6357_REG_FIXED("ldo-vfe28", VFE28, 2800000),
MT6357_REG_FIXED("ldo-vio18", VIO18, 1800000),
MT6357_REG_FIXED("ldo-vio28", VIO28, 2800000),
MT6357_REG_FIXED("ldo-vrf12", VRF12, 1200000),
MT6357_REG_FIXED("ldo-vrf18", VRF18, 1800000),
};
static int mt6357_regulator_probe(struct platform_device *pdev)
{
struct mt6397_chip *mt6357 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = {};
struct regulator_dev *rdev;
int i;
pdev->dev.of_node = pdev->dev.parent->of_node;
for (i = 0; i < MT6357_MAX_REGULATOR; i++) {
config.dev = &pdev->dev;
config.driver_data = &mt6357_regulators[i];
config.regmap = mt6357->regmap;
rdev = devm_regulator_register(&pdev->dev,
&mt6357_regulators[i].desc,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
mt6357_regulators[i].desc.name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id mt6357_platform_ids[] = {
{ "mt6357-regulator" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, mt6357_platform_ids);
static struct platform_driver mt6357_regulator_driver = {
.driver = {
.name = "mt6357-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mt6357_regulator_probe,
.id_table = mt6357_platform_ids,
};
module_platform_driver(mt6357_regulator_driver);
MODULE_AUTHOR("Chen Zhong <[email protected]>");
MODULE_AUTHOR("Fabien Parent <[email protected]>");
MODULE_AUTHOR("Alexandre Mergnat <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6357 PMIC");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/mt6357-regulator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Split TWL6030 logic from twl-regulator.c:
* Copyright (C) 2008 David Brownell
*
* Copyright (C) 2016 Nicolae Rosia <[email protected]>
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/twl.h>
#include <linux/delay.h>
struct twlreg_info {
/* start of regulator's PM_RECEIVER control register bank */
u8 base;
/* twl resource ID, for resource control state machine */
u8 id;
u8 flags;
/* used by regulator core */
struct regulator_desc desc;
/* chip specific features */
unsigned long features;
/* data passed from board for external get/set voltage */
void *data;
};
/* LDO control registers ... offset is from the base of its register bank.
* The first three registers of all power resource banks help hardware to
* manage the various resource groups.
*/
/* Common offset in TWL4030/6030 */
#define VREG_GRP 0
/* TWL6030 register offsets */
#define VREG_TRANS 1
#define VREG_STATE 2
#define VREG_VOLTAGE 3
#define VREG_VOLTAGE_SMPS 4
/* TWL6030 Misc register offsets */
#define VREG_BC_ALL 1
#define VREG_BC_REF 2
#define VREG_BC_PROC 3
#define VREG_BC_CLK_RST 4
/* TWL6030 LDO register values for VREG_VOLTAGE */
#define TWL6030_VREG_VOLTAGE_WR_S BIT(7)
/* TWL6030 LDO register values for CFG_STATE */
#define TWL6030_CFG_STATE_OFF 0x00
#define TWL6030_CFG_STATE_ON 0x01
#define TWL6030_CFG_STATE_OFF2 0x02
#define TWL6030_CFG_STATE_SLEEP 0x03
#define TWL6030_CFG_STATE_GRP_SHIFT 5
#define TWL6030_CFG_STATE_APP_SHIFT 2
#define TWL6030_CFG_STATE_MASK 0x03
#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
TWL6030_CFG_STATE_APP_SHIFT)
/* Flags for SMPS Voltage reading and LDO reading*/
#define SMPS_OFFSET_EN BIT(0)
#define SMPS_EXTENDED_EN BIT(1)
#define TWL_6030_WARM_RESET BIT(3)
/* twl6032 SMPS EPROM values */
#define TWL6030_SMPS_OFFSET 0xB0
#define TWL6030_SMPS_MULT 0xB3
#define SMPS_MULTOFFSET_SMPS4 BIT(0)
#define SMPS_MULTOFFSET_VIO BIT(1)
#define SMPS_MULTOFFSET_SMPS3 BIT(6)
static inline int
twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
{
u8 value;
int status;
status = twl_i2c_read_u8(slave_subgp,
&value, info->base + offset);
return (status < 0) ? status : value;
}
static inline int
twlreg_write(struct twlreg_info *info, unsigned slave_subgp, unsigned offset,
u8 value)
{
return twl_i2c_write_u8(slave_subgp,
value, info->base + offset);
}
/* generic power resource operations, which work on all regulators */
static int twlreg_grp(struct regulator_dev *rdev)
{
return twlreg_read(rdev_get_drvdata(rdev), TWL_MODULE_PM_RECEIVER,
VREG_GRP);
}
/*
* Enable/disable regulators by joining/leaving the P1 (processor) group.
* We assume nobody else is updating the DEV_GRP registers.
*/
/* definition for 6030 family */
#define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */
#define P2_GRP_6030 BIT(1) /* "peripherals" */
#define P1_GRP_6030 BIT(0) /* CPU/Linux */
static int twl6030reg_is_enabled(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp = 0, val;
if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) {
grp = twlreg_grp(rdev);
if (grp < 0)
return grp;
grp &= P1_GRP_6030;
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val = TWL6030_CFG_STATE_APP(val);
} else {
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val &= TWL6030_CFG_STATE_MASK;
grp = 1;
}
return grp && (val == TWL6030_CFG_STATE_ON);
}
#define PB_I2C_BUSY BIT(0)
#define PB_I2C_BWEN BIT(1)
static int twl6030reg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp = 0;
int ret;
if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
grp = twlreg_grp(rdev);
if (grp < 0)
return grp;
ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
grp << TWL6030_CFG_STATE_GRP_SHIFT |
TWL6030_CFG_STATE_ON);
return ret;
}
static int twl6030reg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp = 0;
int ret;
if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
/* For 6030, set the off state for all grps enabled */
ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
(grp) << TWL6030_CFG_STATE_GRP_SHIFT |
TWL6030_CFG_STATE_OFF);
return ret;
}
static int twl6030reg_get_status(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int val;
val = twlreg_grp(rdev);
if (val < 0)
return val;
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
if (info->features & TWL6032_SUBCLASS)
val &= TWL6030_CFG_STATE_MASK;
else
val = TWL6030_CFG_STATE_APP(val);
switch (val) {
case TWL6030_CFG_STATE_ON:
return REGULATOR_STATUS_NORMAL;
case TWL6030_CFG_STATE_SLEEP:
return REGULATOR_STATUS_STANDBY;
case TWL6030_CFG_STATE_OFF:
case TWL6030_CFG_STATE_OFF2:
default:
break;
}
return REGULATOR_STATUS_OFF;
}
static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp = 0;
int val;
if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS)))
grp = twlreg_grp(rdev);
if (grp < 0)
return grp;
/* Compose the state register settings */
val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
/* We can only set the mode through state machine commands... */
switch (mode) {
case REGULATOR_MODE_NORMAL:
val |= TWL6030_CFG_STATE_ON;
break;
case REGULATOR_MODE_STANDBY:
val |= TWL6030_CFG_STATE_SLEEP;
break;
default:
return -EINVAL;
}
return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
}
static int twl6030coresmps_set_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
return -ENODEV;
}
static int twl6030coresmps_get_voltage(struct regulator_dev *rdev)
{
return -ENODEV;
}
static const struct regulator_ops twl6030coresmps_ops = {
.set_voltage = twl6030coresmps_set_voltage,
.get_voltage = twl6030coresmps_get_voltage,
};
static int
twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
if (info->flags & TWL_6030_WARM_RESET)
selector |= TWL6030_VREG_VOLTAGE_WR_S;
return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
selector);
}
static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
if (info->flags & TWL_6030_WARM_RESET)
vsel &= ~TWL6030_VREG_VOLTAGE_WR_S;
return vsel;
}
static const struct regulator_ops twl6030ldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = twl6030ldo_set_voltage_sel,
.get_voltage_sel = twl6030ldo_get_voltage_sel,
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
.set_mode = twl6030reg_set_mode,
.get_status = twl6030reg_get_status,
};
static const struct regulator_ops twl6030fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
.set_mode = twl6030reg_set_mode,
.get_status = twl6030reg_get_status,
};
/*
* SMPS status and control
*/
static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int voltage = 0;
switch (info->flags) {
case SMPS_OFFSET_EN:
voltage = 100000;
fallthrough;
case 0:
switch (index) {
case 0:
voltage = 0;
break;
case 58:
voltage = 1350 * 1000;
break;
case 59:
voltage = 1500 * 1000;
break;
case 60:
voltage = 1800 * 1000;
break;
case 61:
voltage = 1900 * 1000;
break;
case 62:
voltage = 2100 * 1000;
break;
default:
voltage += (600000 + (12500 * (index - 1)));
}
break;
case SMPS_EXTENDED_EN:
switch (index) {
case 0:
voltage = 0;
break;
case 58:
voltage = 2084 * 1000;
break;
case 59:
voltage = 2315 * 1000;
break;
case 60:
voltage = 2778 * 1000;
break;
case 61:
voltage = 2932 * 1000;
break;
case 62:
voltage = 3241 * 1000;
break;
default:
voltage = (1852000 + (38600 * (index - 1)));
}
break;
case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
switch (index) {
case 0:
voltage = 0;
break;
case 58:
voltage = 4167 * 1000;
break;
case 59:
voltage = 2315 * 1000;
break;
case 60:
voltage = 2778 * 1000;
break;
case 61:
voltage = 2932 * 1000;
break;
case 62:
voltage = 3241 * 1000;
break;
default:
voltage = (2161000 + (38600 * (index - 1)));
}
break;
}
return voltage;
}
static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel = 0;
switch (info->flags) {
case 0:
if (min_uV == 0)
vsel = 0;
else if ((min_uV >= 600000) && (min_uV <= 1300000)) {
vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
vsel++;
}
/* Values 1..57 for vsel are linear and can be calculated
* values 58..62 are non linear.
*/
else if ((min_uV > 1900000) && (min_uV <= 2100000))
vsel = 62;
else if ((min_uV > 1800000) && (min_uV <= 1900000))
vsel = 61;
else if ((min_uV > 1500000) && (min_uV <= 1800000))
vsel = 60;
else if ((min_uV > 1350000) && (min_uV <= 1500000))
vsel = 59;
else if ((min_uV > 1300000) && (min_uV <= 1350000))
vsel = 58;
else
return -EINVAL;
break;
case SMPS_OFFSET_EN:
if (min_uV == 0)
vsel = 0;
else if ((min_uV >= 700000) && (min_uV <= 1420000)) {
vsel = DIV_ROUND_UP(min_uV - 700000, 12500);
vsel++;
}
/* Values 1..57 for vsel are linear and can be calculated
* values 58..62 are non linear.
*/
else if ((min_uV > 1900000) && (min_uV <= 2100000))
vsel = 62;
else if ((min_uV > 1800000) && (min_uV <= 1900000))
vsel = 61;
else if ((min_uV > 1500000) && (min_uV <= 1800000))
vsel = 60;
else if ((min_uV > 1350000) && (min_uV <= 1500000))
vsel = 59;
else
return -EINVAL;
break;
case SMPS_EXTENDED_EN:
if (min_uV == 0) {
vsel = 0;
} else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
vsel = DIV_ROUND_UP(min_uV - 1852000, 38600);
vsel++;
}
break;
case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
if (min_uV == 0) {
vsel = 0;
} else if ((min_uV >= 2161000) && (min_uV <= 4321000)) {
vsel = DIV_ROUND_UP(min_uV - 2161000, 38600);
vsel++;
}
break;
}
return vsel;
}
static int twl6030smps_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
selector);
}
static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
}
static const struct regulator_ops twlsmps_ops = {
.list_voltage = twl6030smps_list_voltage,
.map_voltage = twl6030smps_map_voltage,
.set_voltage_sel = twl6030smps_set_voltage_sel,
.get_voltage_sel = twl6030smps_get_voltage_sel,
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
.set_mode = twl6030reg_set_mode,
.get_status = twl6030reg_get_status,
};
/*----------------------------------------------------------------------*/
static const struct linear_range twl6030ldo_linear_range[] = {
REGULATOR_LINEAR_RANGE(0, 0, 0, 0),
REGULATOR_LINEAR_RANGE(1000000, 1, 24, 100000),
REGULATOR_LINEAR_RANGE(2750000, 31, 31, 0),
};
#define TWL6030_ADJUSTABLE_SMPS(label) \
static const struct twlreg_info TWL6030_INFO_##label = { \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
.ops = &twl6030coresmps_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
#define TWL6030_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6030_INFO_##label = { \
.base = offset, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
.n_voltages = 32, \
.linear_ranges = twl6030ldo_linear_range, \
.n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
#define TWL6032_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6032_INFO_##label = { \
.base = offset, \
.features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
.n_voltages = 32, \
.linear_ranges = twl6030ldo_linear_range, \
.n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
static const struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = 0, \
.desc = { \
.name = #label, \
.id = TWL6030##_REG_##label, \
.n_voltages = 1, \
.ops = &twl6030fixed_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = mVolts * 1000, \
.enable_time = turnon_delay, \
.of_map_mode = NULL, \
}, \
}
#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
static const struct twlreg_info TWLSMPS_INFO_##label = { \
.base = offset, \
.features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
.n_voltages = 63, \
.ops = &twlsmps_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
/* VUSBCP is managed *only* by the USB subchip */
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
TWL6030_ADJUSTABLE_SMPS(VDD1);
TWL6030_ADJUSTABLE_SMPS(VDD2);
TWL6030_ADJUSTABLE_SMPS(VDD3);
TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54);
TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58);
TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c);
TWL6030_ADJUSTABLE_LDO(VMMC, 0x68);
TWL6030_ADJUSTABLE_LDO(VPP, 0x6c);
TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74);
/* 6025 are renamed compared to 6030 versions */
TWL6032_ADJUSTABLE_LDO(LDO2, 0x54);
TWL6032_ADJUSTABLE_LDO(LDO4, 0x58);
TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c);
TWL6032_ADJUSTABLE_LDO(LDO5, 0x68);
TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c);
TWL6032_ADJUSTABLE_LDO(LDO7, 0x74);
TWL6032_ADJUSTABLE_LDO(LDO6, 0x60);
TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64);
TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70);
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0);
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0);
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);
TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);
TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0);
TWL6032_ADJUSTABLE_SMPS(SMPS3, 0x34);
TWL6032_ADJUSTABLE_SMPS(SMPS4, 0x10);
TWL6032_ADJUSTABLE_SMPS(VIO, 0x16);
static u8 twl_get_smps_offset(void)
{
u8 value;
twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
TWL6030_SMPS_OFFSET);
return value;
}
static u8 twl_get_smps_mult(void)
{
u8 value;
twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
TWL6030_SMPS_MULT);
return value;
}
#define TWL_OF_MATCH(comp, family, label) \
{ \
.compatible = comp, \
.data = &family##_INFO_##label, \
}
#define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label)
#define TWL6032_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6032, label)
#define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label)
#define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label)
static const struct of_device_id twl_of_match[] = {
TWL6030_OF_MATCH("ti,twl6030-vdd1", VDD1),
TWL6030_OF_MATCH("ti,twl6030-vdd2", VDD2),
TWL6030_OF_MATCH("ti,twl6030-vdd3", VDD3),
TWL6030_OF_MATCH("ti,twl6030-vaux1", VAUX1_6030),
TWL6030_OF_MATCH("ti,twl6030-vaux2", VAUX2_6030),
TWL6030_OF_MATCH("ti,twl6030-vaux3", VAUX3_6030),
TWL6030_OF_MATCH("ti,twl6030-vmmc", VMMC),
TWL6030_OF_MATCH("ti,twl6030-vpp", VPP),
TWL6030_OF_MATCH("ti,twl6030-vusim", VUSIM),
TWL6032_OF_MATCH("ti,twl6032-ldo2", LDO2),
TWL6032_OF_MATCH("ti,twl6032-ldo4", LDO4),
TWL6032_OF_MATCH("ti,twl6032-ldo3", LDO3),
TWL6032_OF_MATCH("ti,twl6032-ldo5", LDO5),
TWL6032_OF_MATCH("ti,twl6032-ldo1", LDO1),
TWL6032_OF_MATCH("ti,twl6032-ldo7", LDO7),
TWL6032_OF_MATCH("ti,twl6032-ldo6", LDO6),
TWL6032_OF_MATCH("ti,twl6032-ldoln", LDOLN),
TWL6032_OF_MATCH("ti,twl6032-ldousb", LDOUSB),
TWLFIXED_OF_MATCH("ti,twl6030-vana", VANA),
TWLFIXED_OF_MATCH("ti,twl6030-vcxio", VCXIO),
TWLFIXED_OF_MATCH("ti,twl6030-vdac", VDAC),
TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB),
TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8),
TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1),
TWLSMPS_OF_MATCH("ti,twl6032-smps3", SMPS3),
TWLSMPS_OF_MATCH("ti,twl6032-smps4", SMPS4),
TWLSMPS_OF_MATCH("ti,twl6032-vio", VIO),
{},
};
MODULE_DEVICE_TABLE(of, twl_of_match);
static int twlreg_probe(struct platform_device *pdev)
{
int id;
struct twlreg_info *info;
const struct twlreg_info *template;
struct regulator_init_data *initdata;
struct regulation_constraints *c;
struct regulator_dev *rdev;
struct regulator_config config = { };
struct device_node *np = pdev->dev.of_node;
template = of_device_get_match_data(&pdev->dev);
if (!template)
return -ENODEV;
id = template->desc.id;
initdata = of_get_regulator_init_data(&pdev->dev, np, &template->desc);
if (!initdata)
return -EINVAL;
info = devm_kmemdup(&pdev->dev, template, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
/* Constrain board-specific capabilities according to what
* this driver and the chip itself can actually do.
*/
c = &initdata->constraints;
c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY;
c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS;
switch (id) {
case TWL6032_REG_SMPS3:
if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
info->flags |= SMPS_EXTENDED_EN;
if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
info->flags |= SMPS_OFFSET_EN;
break;
case TWL6032_REG_SMPS4:
if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
info->flags |= SMPS_EXTENDED_EN;
if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
info->flags |= SMPS_OFFSET_EN;
break;
case TWL6032_REG_VIO:
if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
info->flags |= SMPS_EXTENDED_EN;
if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
info->flags |= SMPS_OFFSET_EN;
break;
}
if (of_property_read_bool(np, "ti,retain-on-reset"))
info->flags |= TWL_6030_WARM_RESET;
config.dev = &pdev->dev;
config.init_data = initdata;
config.driver_data = info;
config.of_node = np;
rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
info->desc.name, PTR_ERR(rdev));
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
/* NOTE: many regulators support short-circuit IRQs (presentable
* as REGULATOR_OVER_CURRENT notifications?) configured via:
* - SC_CONFIG
* - SC_DETECT1 (vintana2, vmmc1/2, vaux1/2/3/4)
* - SC_DETECT2 (vusb, vdac, vio, vdd1/2, vpll2)
* - IT_CONFIG
*/
return 0;
}
MODULE_ALIAS("platform:twl6030_reg");
static struct platform_driver twlreg_driver = {
.probe = twlreg_probe,
/* NOTE: short name, to work around driver model truncation of
* "twl_regulator.12" (and friends) to "twl_regulator.1".
*/
.driver = {
.name = "twl6030_reg",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(twl_of_match),
},
};
static int __init twlreg_init(void)
{
return platform_driver_register(&twlreg_driver);
}
subsys_initcall(twlreg_init);
static void __exit twlreg_exit(void)
{
platform_driver_unregister(&twlreg_driver);
}
module_exit(twlreg_exit)
MODULE_DESCRIPTION("TWL6030 regulator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/twl6030-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
// http://www.samsung.com
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mps15.h>
#include <linux/mfd/samsung/s2mpu02.h>
/* The highest number of possible regulators for supported devices. */
#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
struct s2mps11_info {
int ramp_delay2;
int ramp_delay34;
int ramp_delay5;
int ramp_delay16;
int ramp_delay7810;
int ramp_delay9;
enum sec_device_type dev_type;
/*
* One bit for each S2MPS11/S2MPS13/S2MPS14/S2MPU02 regulator whether
* the suspend mode was enabled.
*/
DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
/*
* Array (size: number of regulators) with GPIO-s for external
* sleep control.
*/
struct gpio_desc **ext_control_gpiod;
};
static int get_ramp_delay(int ramp_delay)
{
unsigned char cnt = 0;
ramp_delay /= 6250;
while (true) {
ramp_delay = ramp_delay >> 1;
if (ramp_delay == 0)
break;
cnt++;
}
if (cnt > 3)
cnt = 3;
return cnt;
}
static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int old_selector,
unsigned int new_selector)
{
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
int rdev_id = rdev_get_id(rdev);
unsigned int ramp_delay = 0;
int old_volt, new_volt;
switch (rdev_id) {
case S2MPS11_BUCK2:
ramp_delay = s2mps11->ramp_delay2;
break;
case S2MPS11_BUCK3:
case S2MPS11_BUCK4:
ramp_delay = s2mps11->ramp_delay34;
break;
case S2MPS11_BUCK5:
ramp_delay = s2mps11->ramp_delay5;
break;
case S2MPS11_BUCK6:
case S2MPS11_BUCK1:
ramp_delay = s2mps11->ramp_delay16;
break;
case S2MPS11_BUCK7:
case S2MPS11_BUCK8:
case S2MPS11_BUCK10:
ramp_delay = s2mps11->ramp_delay7810;
break;
case S2MPS11_BUCK9:
ramp_delay = s2mps11->ramp_delay9;
}
if (ramp_delay == 0)
ramp_delay = rdev->desc->ramp_delay;
old_volt = rdev->desc->min_uV + (rdev->desc->uV_step * old_selector);
new_volt = rdev->desc->min_uV + (rdev->desc->uV_step * new_selector);
return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
}
static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
unsigned int ramp_val, ramp_shift, ramp_reg = S2MPS11_REG_RAMP_BUCK;
unsigned int ramp_enable = 1, enable_shift = 0;
int rdev_id = rdev_get_id(rdev);
int ret;
switch (rdev_id) {
case S2MPS11_BUCK1:
if (ramp_delay > s2mps11->ramp_delay16)
s2mps11->ramp_delay16 = ramp_delay;
else
ramp_delay = s2mps11->ramp_delay16;
ramp_shift = S2MPS11_BUCK16_RAMP_SHIFT;
break;
case S2MPS11_BUCK2:
enable_shift = S2MPS11_BUCK2_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
s2mps11->ramp_delay2 = ramp_delay;
ramp_shift = S2MPS11_BUCK2_RAMP_SHIFT;
ramp_reg = S2MPS11_REG_RAMP;
break;
case S2MPS11_BUCK3:
enable_shift = S2MPS11_BUCK3_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
if (ramp_delay > s2mps11->ramp_delay34)
s2mps11->ramp_delay34 = ramp_delay;
else
ramp_delay = s2mps11->ramp_delay34;
ramp_shift = S2MPS11_BUCK34_RAMP_SHIFT;
ramp_reg = S2MPS11_REG_RAMP;
break;
case S2MPS11_BUCK4:
enable_shift = S2MPS11_BUCK4_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
if (ramp_delay > s2mps11->ramp_delay34)
s2mps11->ramp_delay34 = ramp_delay;
else
ramp_delay = s2mps11->ramp_delay34;
ramp_shift = S2MPS11_BUCK34_RAMP_SHIFT;
ramp_reg = S2MPS11_REG_RAMP;
break;
case S2MPS11_BUCK5:
s2mps11->ramp_delay5 = ramp_delay;
ramp_shift = S2MPS11_BUCK5_RAMP_SHIFT;
break;
case S2MPS11_BUCK6:
enable_shift = S2MPS11_BUCK6_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
if (ramp_delay > s2mps11->ramp_delay16)
s2mps11->ramp_delay16 = ramp_delay;
else
ramp_delay = s2mps11->ramp_delay16;
ramp_shift = S2MPS11_BUCK16_RAMP_SHIFT;
break;
case S2MPS11_BUCK7:
case S2MPS11_BUCK8:
case S2MPS11_BUCK10:
if (ramp_delay > s2mps11->ramp_delay7810)
s2mps11->ramp_delay7810 = ramp_delay;
else
ramp_delay = s2mps11->ramp_delay7810;
ramp_shift = S2MPS11_BUCK7810_RAMP_SHIFT;
break;
case S2MPS11_BUCK9:
s2mps11->ramp_delay9 = ramp_delay;
ramp_shift = S2MPS11_BUCK9_RAMP_SHIFT;
break;
default:
return 0;
}
if (!ramp_enable)
goto ramp_disable;
/* Ramp delay can be enabled/disabled only for buck[2346] */
if ((rdev_id >= S2MPS11_BUCK2 && rdev_id <= S2MPS11_BUCK4) ||
rdev_id == S2MPS11_BUCK6) {
ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
1 << enable_shift, 1 << enable_shift);
if (ret) {
dev_err(&rdev->dev, "failed to enable ramp rate\n");
return ret;
}
}
ramp_val = get_ramp_delay(ramp_delay);
return regmap_update_bits(rdev->regmap, ramp_reg, 0x3 << ramp_shift,
ramp_val << ramp_shift);
ramp_disable:
return regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
1 << enable_shift, 0);
}
static int s2mps11_regulator_enable(struct regulator_dev *rdev)
{
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
int rdev_id = rdev_get_id(rdev);
unsigned int val;
switch (s2mps11->dev_type) {
case S2MPS11X:
if (test_bit(rdev_id, s2mps11->suspend_state))
val = S2MPS14_ENABLE_SUSPEND;
else
val = rdev->desc->enable_mask;
break;
case S2MPS13X:
case S2MPS14X:
if (test_bit(rdev_id, s2mps11->suspend_state))
val = S2MPS14_ENABLE_SUSPEND;
else if (s2mps11->ext_control_gpiod[rdev_id])
val = S2MPS14_ENABLE_EXT_CONTROL;
else
val = rdev->desc->enable_mask;
break;
case S2MPU02:
if (test_bit(rdev_id, s2mps11->suspend_state))
val = S2MPU02_ENABLE_SUSPEND;
else
val = rdev->desc->enable_mask;
break;
default:
return -EINVAL;
}
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val);
}
static int s2mps11_regulator_set_suspend_disable(struct regulator_dev *rdev)
{
int ret;
unsigned int val, state;
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
int rdev_id = rdev_get_id(rdev);
/* Below LDO should be always on or does not support suspend mode. */
switch (s2mps11->dev_type) {
case S2MPS11X:
switch (rdev_id) {
case S2MPS11_LDO2:
case S2MPS11_LDO36:
case S2MPS11_LDO37:
case S2MPS11_LDO38:
return 0;
default:
state = S2MPS14_ENABLE_SUSPEND;
break;
}
break;
case S2MPS13X:
case S2MPS14X:
switch (rdev_id) {
case S2MPS14_LDO3:
return 0;
default:
state = S2MPS14_ENABLE_SUSPEND;
break;
}
break;
case S2MPU02:
switch (rdev_id) {
case S2MPU02_LDO13:
case S2MPU02_LDO14:
case S2MPU02_LDO15:
case S2MPU02_LDO17:
case S2MPU02_BUCK7:
state = S2MPU02_DISABLE_SUSPEND;
break;
default:
state = S2MPU02_ENABLE_SUSPEND;
break;
}
break;
default:
return -EINVAL;
}
ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
if (ret < 0)
return ret;
set_bit(rdev_id, s2mps11->suspend_state);
/*
* Don't enable suspend mode if regulator is already disabled because
* this would effectively for a short time turn on the regulator after
* resuming.
* However we still want to toggle the suspend_state bit for regulator
* in case if it got enabled before suspending the system.
*/
if (!(val & rdev->desc->enable_mask))
return 0;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, state);
}
static const struct regulator_ops s2mps11_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
static const struct regulator_ops s2mps11_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = s2mps11_regulator_set_voltage_time_sel,
.set_ramp_delay = s2mps11_set_ramp_delay,
.set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
#define regulator_desc_s2mps11_ldo(num, step) { \
.name = "LDO"#num, \
.id = S2MPS11_LDO##num, \
.ops = &s2mps11_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.ramp_delay = RAMP_DELAY_12_MVUS, \
.min_uV = MIN_800_MV, \
.uV_step = step, \
.n_voltages = S2MPS11_LDO_N_VOLTAGES, \
.vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPS11_LDO_VSEL_MASK, \
.enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
#define regulator_desc_s2mps11_buck1_4(num) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MIN_650_MV, \
.uV_step = STEP_6_25_MV, \
.linear_min_sel = 8, \
.n_voltages = S2MPS11_BUCK12346_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
.enable_reg = S2MPS11_REG_B1CTRL1 + (num - 1) * 2, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
#define regulator_desc_s2mps11_buck5 { \
.name = "BUCK5", \
.id = S2MPS11_BUCK5, \
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MIN_650_MV, \
.uV_step = STEP_6_25_MV, \
.linear_min_sel = 8, \
.n_voltages = S2MPS11_BUCK5_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B5CTRL2, \
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
.enable_reg = S2MPS11_REG_B5CTRL1, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
#define regulator_desc_s2mps11_buck67810(num, min, step, min_sel, voltages) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.linear_min_sel = min_sel, \
.n_voltages = voltages, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B6CTRL2 + (num - 6) * 2, \
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
.enable_reg = S2MPS11_REG_B6CTRL1 + (num - 6) * 2, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
#define regulator_desc_s2mps11_buck9 { \
.name = "BUCK9", \
.id = S2MPS11_BUCK9, \
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MIN_3000_MV, \
.uV_step = STEP_25_MV, \
.n_voltages = S2MPS11_BUCK9_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B9CTRL2, \
.vsel_mask = S2MPS11_BUCK9_VSEL_MASK, \
.enable_reg = S2MPS11_REG_B9CTRL1, \
.enable_mask = S2MPS11_ENABLE_MASK \
}
static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(1, STEP_25_MV),
regulator_desc_s2mps11_ldo(2, STEP_50_MV),
regulator_desc_s2mps11_ldo(3, STEP_50_MV),
regulator_desc_s2mps11_ldo(4, STEP_50_MV),
regulator_desc_s2mps11_ldo(5, STEP_50_MV),
regulator_desc_s2mps11_ldo(6, STEP_25_MV),
regulator_desc_s2mps11_ldo(7, STEP_50_MV),
regulator_desc_s2mps11_ldo(8, STEP_50_MV),
regulator_desc_s2mps11_ldo(9, STEP_50_MV),
regulator_desc_s2mps11_ldo(10, STEP_50_MV),
regulator_desc_s2mps11_ldo(11, STEP_25_MV),
regulator_desc_s2mps11_ldo(12, STEP_50_MV),
regulator_desc_s2mps11_ldo(13, STEP_50_MV),
regulator_desc_s2mps11_ldo(14, STEP_50_MV),
regulator_desc_s2mps11_ldo(15, STEP_50_MV),
regulator_desc_s2mps11_ldo(16, STEP_50_MV),
regulator_desc_s2mps11_ldo(17, STEP_50_MV),
regulator_desc_s2mps11_ldo(18, STEP_50_MV),
regulator_desc_s2mps11_ldo(19, STEP_50_MV),
regulator_desc_s2mps11_ldo(20, STEP_50_MV),
regulator_desc_s2mps11_ldo(21, STEP_50_MV),
regulator_desc_s2mps11_ldo(22, STEP_25_MV),
regulator_desc_s2mps11_ldo(23, STEP_25_MV),
regulator_desc_s2mps11_ldo(24, STEP_50_MV),
regulator_desc_s2mps11_ldo(25, STEP_50_MV),
regulator_desc_s2mps11_ldo(26, STEP_50_MV),
regulator_desc_s2mps11_ldo(27, STEP_25_MV),
regulator_desc_s2mps11_ldo(28, STEP_50_MV),
regulator_desc_s2mps11_ldo(29, STEP_50_MV),
regulator_desc_s2mps11_ldo(30, STEP_50_MV),
regulator_desc_s2mps11_ldo(31, STEP_50_MV),
regulator_desc_s2mps11_ldo(32, STEP_50_MV),
regulator_desc_s2mps11_ldo(33, STEP_50_MV),
regulator_desc_s2mps11_ldo(34, STEP_50_MV),
regulator_desc_s2mps11_ldo(35, STEP_25_MV),
regulator_desc_s2mps11_ldo(36, STEP_50_MV),
regulator_desc_s2mps11_ldo(37, STEP_50_MV),
regulator_desc_s2mps11_ldo(38, STEP_50_MV),
regulator_desc_s2mps11_buck1_4(1),
regulator_desc_s2mps11_buck1_4(2),
regulator_desc_s2mps11_buck1_4(3),
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck67810(6, MIN_650_MV, STEP_6_25_MV, 8,
S2MPS11_BUCK12346_N_VOLTAGES),
regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV, 0,
S2MPS11_BUCK7810_N_VOLTAGES),
regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV, 0,
S2MPS11_BUCK7810_N_VOLTAGES),
regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV, 0,
S2MPS11_BUCK7810_N_VOLTAGES),
};
static const struct regulator_ops s2mps14_reg_ops;
#define regulator_desc_s2mps13_ldo(num, min, step, min_sel) { \
.name = "LDO"#num, \
.id = S2MPS13_LDO##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.linear_min_sel = min_sel, \
.n_voltages = S2MPS14_LDO_N_VOLTAGES, \
.vsel_reg = S2MPS13_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPS14_LDO_VSEL_MASK, \
.enable_reg = S2MPS13_REG_L1CTRL + num - 1, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
#define regulator_desc_s2mps13_buck(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS13_BUCK##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.linear_min_sel = min_sel, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS13_REG_B1OUT + (num - 1) * 2, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
.enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS13_BUCK##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.linear_min_sel = min_sel, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
.enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS13_BUCK##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.linear_min_sel = min_sel, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
.enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(3, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(4, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(5, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(6, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(7, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(8, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(9, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(10, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(11, MIN_800_MV, STEP_25_MV, 0x10),
regulator_desc_s2mps13_ldo(12, MIN_800_MV, STEP_25_MV, 0x10),
regulator_desc_s2mps13_ldo(13, MIN_800_MV, STEP_25_MV, 0x10),
regulator_desc_s2mps13_ldo(14, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(15, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(16, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(17, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(18, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(19, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(20, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(21, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(22, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(23, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(24, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(25, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(26, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(27, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(28, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(29, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(30, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(31, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(32, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(33, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(34, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(35, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(36, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(37, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(38, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_ldo(39, MIN_1000_MV, STEP_25_MV, 0x08),
regulator_desc_s2mps13_ldo(40, MIN_1400_MV, STEP_50_MV, 0x0C),
regulator_desc_s2mps13_buck(1, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(2, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(3, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10),
};
static const struct regulator_ops s2mps14_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
#define regulator_desc_s2mps14_ldo(num, min, step) { \
.name = "LDO"#num, \
.id = S2MPS14_LDO##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.n_voltages = S2MPS14_LDO_N_VOLTAGES, \
.vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPS14_LDO_VSEL_MASK, \
.enable_reg = S2MPS14_REG_L1CTRL + num - 1, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS14_BUCK##num, \
.ops = &s2mps14_reg_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
.linear_min_sel = min_sel, \
.ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
.enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \
.enable_mask = S2MPS14_ENABLE_MASK \
}
static const struct regulator_desc s2mps14_regulators[] = {
regulator_desc_s2mps14_ldo(1, MIN_800_MV, STEP_12_5_MV),
regulator_desc_s2mps14_ldo(2, MIN_800_MV, STEP_12_5_MV),
regulator_desc_s2mps14_ldo(3, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(4, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(5, MIN_800_MV, STEP_12_5_MV),
regulator_desc_s2mps14_ldo(6, MIN_800_MV, STEP_12_5_MV),
regulator_desc_s2mps14_ldo(7, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(8, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(9, MIN_800_MV, STEP_12_5_MV),
regulator_desc_s2mps14_ldo(10, MIN_800_MV, STEP_12_5_MV),
regulator_desc_s2mps14_ldo(11, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(12, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(13, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(14, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(15, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(16, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(17, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(18, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(19, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(20, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(21, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(22, MIN_800_MV, STEP_12_5_MV),
regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
S2MPS14_BUCK1235_START_SEL),
regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
S2MPS14_BUCK1235_START_SEL),
regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
S2MPS14_BUCK1235_START_SEL),
regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
S2MPS14_BUCK4_START_SEL),
regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
S2MPS14_BUCK1235_START_SEL),
};
static const struct regulator_ops s2mps15_reg_ldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_ops s2mps15_reg_buck_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
#define regulator_desc_s2mps15_ldo(num, range) { \
.name = "LDO"#num, \
.id = S2MPS15_LDO##num, \
.ops = &s2mps15_reg_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.linear_ranges = range, \
.n_linear_ranges = ARRAY_SIZE(range), \
.n_voltages = S2MPS15_LDO_N_VOLTAGES, \
.vsel_reg = S2MPS15_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPS15_LDO_VSEL_MASK, \
.enable_reg = S2MPS15_REG_L1CTRL + num - 1, \
.enable_mask = S2MPS15_ENABLE_MASK \
}
#define regulator_desc_s2mps15_buck(num, range) { \
.name = "BUCK"#num, \
.id = S2MPS15_BUCK##num, \
.ops = &s2mps15_reg_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.linear_ranges = range, \
.n_linear_ranges = ARRAY_SIZE(range), \
.ramp_delay = 12500, \
.n_voltages = S2MPS15_BUCK_N_VOLTAGES, \
.vsel_reg = S2MPS15_REG_B1CTRL2 + ((num - 1) * 2), \
.vsel_mask = S2MPS15_BUCK_VSEL_MASK, \
.enable_reg = S2MPS15_REG_B1CTRL1 + ((num - 1) * 2), \
.enable_mask = S2MPS15_ENABLE_MASK \
}
/* voltage range for s2mps15 LDO 3, 5, 15, 16, 18, 20, 23 and 27 */
static const struct linear_range s2mps15_ldo_voltage_ranges1[] = {
REGULATOR_LINEAR_RANGE(1000000, 0xc, 0x38, 25000),
};
/* voltage range for s2mps15 LDO 2, 6, 14, 17, 19, 21, 24 and 25 */
static const struct linear_range s2mps15_ldo_voltage_ranges2[] = {
REGULATOR_LINEAR_RANGE(1800000, 0x0, 0x3f, 25000),
};
/* voltage range for s2mps15 LDO 4, 11, 12, 13, 22 and 26 */
static const struct linear_range s2mps15_ldo_voltage_ranges3[] = {
REGULATOR_LINEAR_RANGE(700000, 0x0, 0x34, 12500),
};
/* voltage range for s2mps15 LDO 7, 8, 9 and 10 */
static const struct linear_range s2mps15_ldo_voltage_ranges4[] = {
REGULATOR_LINEAR_RANGE(700000, 0x10, 0x20, 25000),
};
/* voltage range for s2mps15 LDO 1 */
static const struct linear_range s2mps15_ldo_voltage_ranges5[] = {
REGULATOR_LINEAR_RANGE(500000, 0x0, 0x20, 12500),
};
/* voltage range for s2mps15 BUCK 1, 2, 3, 4, 5, 6 and 7 */
static const struct linear_range s2mps15_buck_voltage_ranges1[] = {
REGULATOR_LINEAR_RANGE(500000, 0x20, 0xc0, 6250),
};
/* voltage range for s2mps15 BUCK 8, 9 and 10 */
static const struct linear_range s2mps15_buck_voltage_ranges2[] = {
REGULATOR_LINEAR_RANGE(1000000, 0x20, 0x78, 12500),
};
static const struct regulator_desc s2mps15_regulators[] = {
regulator_desc_s2mps15_ldo(1, s2mps15_ldo_voltage_ranges5),
regulator_desc_s2mps15_ldo(2, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(3, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_ldo(4, s2mps15_ldo_voltage_ranges3),
regulator_desc_s2mps15_ldo(5, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_ldo(6, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(7, s2mps15_ldo_voltage_ranges4),
regulator_desc_s2mps15_ldo(8, s2mps15_ldo_voltage_ranges4),
regulator_desc_s2mps15_ldo(9, s2mps15_ldo_voltage_ranges4),
regulator_desc_s2mps15_ldo(10, s2mps15_ldo_voltage_ranges4),
regulator_desc_s2mps15_ldo(11, s2mps15_ldo_voltage_ranges3),
regulator_desc_s2mps15_ldo(12, s2mps15_ldo_voltage_ranges3),
regulator_desc_s2mps15_ldo(13, s2mps15_ldo_voltage_ranges3),
regulator_desc_s2mps15_ldo(14, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(15, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_ldo(16, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_ldo(17, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(18, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_ldo(19, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(20, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_ldo(21, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(22, s2mps15_ldo_voltage_ranges3),
regulator_desc_s2mps15_ldo(23, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_ldo(24, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(25, s2mps15_ldo_voltage_ranges2),
regulator_desc_s2mps15_ldo(26, s2mps15_ldo_voltage_ranges3),
regulator_desc_s2mps15_ldo(27, s2mps15_ldo_voltage_ranges1),
regulator_desc_s2mps15_buck(1, s2mps15_buck_voltage_ranges1),
regulator_desc_s2mps15_buck(2, s2mps15_buck_voltage_ranges1),
regulator_desc_s2mps15_buck(3, s2mps15_buck_voltage_ranges1),
regulator_desc_s2mps15_buck(4, s2mps15_buck_voltage_ranges1),
regulator_desc_s2mps15_buck(5, s2mps15_buck_voltage_ranges1),
regulator_desc_s2mps15_buck(6, s2mps15_buck_voltage_ranges1),
regulator_desc_s2mps15_buck(7, s2mps15_buck_voltage_ranges1),
regulator_desc_s2mps15_buck(8, s2mps15_buck_voltage_ranges2),
regulator_desc_s2mps15_buck(9, s2mps15_buck_voltage_ranges2),
regulator_desc_s2mps15_buck(10, s2mps15_buck_voltage_ranges2),
};
static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
struct regulator_dev *rdev)
{
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, S2MPS14_ENABLE_EXT_CONTROL);
}
static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
struct of_regulator_match *rdata, struct s2mps11_info *s2mps11)
{
struct gpio_desc **gpio = s2mps11->ext_control_gpiod;
unsigned int i;
unsigned int valid_regulators[3] = { S2MPS14_LDO10, S2MPS14_LDO11,
S2MPS14_LDO12 };
for (i = 0; i < ARRAY_SIZE(valid_regulators); i++) {
unsigned int reg = valid_regulators[i];
if (!rdata[reg].init_data || !rdata[reg].of_node)
continue;
gpio[reg] = devm_fwnode_gpiod_get(&pdev->dev,
of_fwnode_handle(rdata[reg].of_node),
"samsung,ext-control",
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s2mps11-regulator");
if (PTR_ERR(gpio[reg]) == -ENOENT)
gpio[reg] = NULL;
else if (IS_ERR(gpio[reg])) {
dev_err(&pdev->dev, "Failed to get control GPIO for %d/%s\n",
reg, rdata[reg].name);
gpio[reg] = NULL;
continue;
}
if (gpio[reg])
dev_dbg(&pdev->dev, "Using GPIO for ext-control over %d/%s\n",
reg, rdata[reg].name);
}
}
static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
struct of_regulator_match *rdata, struct s2mps11_info *s2mps11,
unsigned int rdev_num)
{
struct device_node *reg_np;
reg_np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
if (!reg_np) {
dev_err(&pdev->dev, "could not find regulators sub-node\n");
return -EINVAL;
}
of_regulator_match(&pdev->dev, reg_np, rdata, rdev_num);
if (s2mps11->dev_type == S2MPS14X)
s2mps14_pmic_dt_parse_ext_control_gpio(pdev, rdata, s2mps11);
of_node_put(reg_np);
return 0;
}
static int s2mpu02_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_val, ramp_shift, ramp_reg;
int rdev_id = rdev_get_id(rdev);
switch (rdev_id) {
case S2MPU02_BUCK1:
ramp_shift = S2MPU02_BUCK1_RAMP_SHIFT;
break;
case S2MPU02_BUCK2:
ramp_shift = S2MPU02_BUCK2_RAMP_SHIFT;
break;
case S2MPU02_BUCK3:
ramp_shift = S2MPU02_BUCK3_RAMP_SHIFT;
break;
case S2MPU02_BUCK4:
ramp_shift = S2MPU02_BUCK4_RAMP_SHIFT;
break;
default:
return 0;
}
ramp_reg = S2MPU02_REG_RAMP1;
ramp_val = get_ramp_delay(ramp_delay);
return regmap_update_bits(rdev->regmap, ramp_reg,
S2MPU02_BUCK1234_RAMP_MASK << ramp_shift,
ramp_val << ramp_shift);
}
static const struct regulator_ops s2mpu02_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
static const struct regulator_ops s2mpu02_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = s2mps11_regulator_set_suspend_disable,
.set_ramp_delay = s2mpu02_set_ramp_delay,
};
#define regulator_desc_s2mpu02_ldo1(num) { \
.name = "LDO"#num, \
.id = S2MPU02_LDO##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_LDO_MIN_900MV, \
.uV_step = S2MPU02_LDO_STEP_12_5MV, \
.linear_min_sel = S2MPU02_LDO_GROUP1_START_SEL, \
.n_voltages = S2MPU02_LDO_N_VOLTAGES, \
.vsel_reg = S2MPU02_REG_L1CTRL, \
.vsel_mask = S2MPU02_LDO_VSEL_MASK, \
.enable_reg = S2MPU02_REG_L1CTRL, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_ldo2(num) { \
.name = "LDO"#num, \
.id = S2MPU02_LDO##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_LDO_MIN_1050MV, \
.uV_step = S2MPU02_LDO_STEP_25MV, \
.linear_min_sel = S2MPU02_LDO_GROUP2_START_SEL, \
.n_voltages = S2MPU02_LDO_N_VOLTAGES, \
.vsel_reg = S2MPU02_REG_L2CTRL1, \
.vsel_mask = S2MPU02_LDO_VSEL_MASK, \
.enable_reg = S2MPU02_REG_L2CTRL1, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_ldo3(num) { \
.name = "LDO"#num, \
.id = S2MPU02_LDO##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_LDO_MIN_900MV, \
.uV_step = S2MPU02_LDO_STEP_12_5MV, \
.linear_min_sel = S2MPU02_LDO_GROUP1_START_SEL, \
.n_voltages = S2MPU02_LDO_N_VOLTAGES, \
.vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \
.vsel_mask = S2MPU02_LDO_VSEL_MASK, \
.enable_reg = S2MPU02_REG_L3CTRL + num - 3, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_ldo4(num) { \
.name = "LDO"#num, \
.id = S2MPU02_LDO##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_LDO_MIN_1050MV, \
.uV_step = S2MPU02_LDO_STEP_25MV, \
.linear_min_sel = S2MPU02_LDO_GROUP2_START_SEL, \
.n_voltages = S2MPU02_LDO_N_VOLTAGES, \
.vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \
.vsel_mask = S2MPU02_LDO_VSEL_MASK, \
.enable_reg = S2MPU02_REG_L3CTRL + num - 3, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_ldo5(num) { \
.name = "LDO"#num, \
.id = S2MPU02_LDO##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_LDO_MIN_1600MV, \
.uV_step = S2MPU02_LDO_STEP_50MV, \
.linear_min_sel = S2MPU02_LDO_GROUP3_START_SEL, \
.n_voltages = S2MPU02_LDO_N_VOLTAGES, \
.vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \
.vsel_mask = S2MPU02_LDO_VSEL_MASK, \
.enable_reg = S2MPU02_REG_L3CTRL + num - 3, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_buck1234(num) { \
.name = "BUCK"#num, \
.id = S2MPU02_BUCK##num, \
.ops = &s2mpu02_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_BUCK1234_MIN_600MV, \
.uV_step = S2MPU02_BUCK1234_STEP_6_25MV, \
.n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
.linear_min_sel = S2MPU02_BUCK1234_START_SEL, \
.ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPU02_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
.enable_reg = S2MPU02_REG_B1CTRL1 + (num - 1) * 2, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_buck5(num) { \
.name = "BUCK"#num, \
.id = S2MPU02_BUCK##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_BUCK5_MIN_1081_25MV, \
.uV_step = S2MPU02_BUCK5_STEP_6_25MV, \
.n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
.linear_min_sel = S2MPU02_BUCK5_START_SEL, \
.ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPU02_REG_B5CTRL2, \
.vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
.enable_reg = S2MPU02_REG_B5CTRL1, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_buck6(num) { \
.name = "BUCK"#num, \
.id = S2MPU02_BUCK##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_BUCK6_MIN_1700MV, \
.uV_step = S2MPU02_BUCK6_STEP_2_50MV, \
.n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
.linear_min_sel = S2MPU02_BUCK6_START_SEL, \
.ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPU02_REG_B6CTRL2, \
.vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
.enable_reg = S2MPU02_REG_B6CTRL1, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
#define regulator_desc_s2mpu02_buck7(num) { \
.name = "BUCK"#num, \
.id = S2MPU02_BUCK##num, \
.ops = &s2mpu02_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = S2MPU02_BUCK7_MIN_900MV, \
.uV_step = S2MPU02_BUCK7_STEP_6_25MV, \
.n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
.linear_min_sel = S2MPU02_BUCK7_START_SEL, \
.ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPU02_REG_B7CTRL2, \
.vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
.enable_reg = S2MPU02_REG_B7CTRL1, \
.enable_mask = S2MPU02_ENABLE_MASK \
}
static const struct regulator_desc s2mpu02_regulators[] = {
regulator_desc_s2mpu02_ldo1(1),
regulator_desc_s2mpu02_ldo2(2),
regulator_desc_s2mpu02_ldo4(3),
regulator_desc_s2mpu02_ldo5(4),
regulator_desc_s2mpu02_ldo4(5),
regulator_desc_s2mpu02_ldo3(6),
regulator_desc_s2mpu02_ldo3(7),
regulator_desc_s2mpu02_ldo4(8),
regulator_desc_s2mpu02_ldo5(9),
regulator_desc_s2mpu02_ldo3(10),
regulator_desc_s2mpu02_ldo4(11),
regulator_desc_s2mpu02_ldo5(12),
regulator_desc_s2mpu02_ldo5(13),
regulator_desc_s2mpu02_ldo5(14),
regulator_desc_s2mpu02_ldo5(15),
regulator_desc_s2mpu02_ldo5(16),
regulator_desc_s2mpu02_ldo4(17),
regulator_desc_s2mpu02_ldo5(18),
regulator_desc_s2mpu02_ldo3(19),
regulator_desc_s2mpu02_ldo4(20),
regulator_desc_s2mpu02_ldo5(21),
regulator_desc_s2mpu02_ldo5(22),
regulator_desc_s2mpu02_ldo5(23),
regulator_desc_s2mpu02_ldo4(24),
regulator_desc_s2mpu02_ldo5(25),
regulator_desc_s2mpu02_ldo4(26),
regulator_desc_s2mpu02_ldo5(27),
regulator_desc_s2mpu02_ldo5(28),
regulator_desc_s2mpu02_buck1234(1),
regulator_desc_s2mpu02_buck1234(2),
regulator_desc_s2mpu02_buck1234(3),
regulator_desc_s2mpu02_buck1234(4),
regulator_desc_s2mpu02_buck5(5),
regulator_desc_s2mpu02_buck6(6),
regulator_desc_s2mpu02_buck7(7),
};
static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct of_regulator_match *rdata = NULL;
struct regulator_config config = { };
struct s2mps11_info *s2mps11;
unsigned int rdev_num = 0;
int i, ret = 0;
const struct regulator_desc *regulators;
s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
GFP_KERNEL);
if (!s2mps11)
return -ENOMEM;
s2mps11->dev_type = platform_get_device_id(pdev)->driver_data;
switch (s2mps11->dev_type) {
case S2MPS11X:
rdev_num = ARRAY_SIZE(s2mps11_regulators);
regulators = s2mps11_regulators;
BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps11_regulators));
break;
case S2MPS13X:
rdev_num = ARRAY_SIZE(s2mps13_regulators);
regulators = s2mps13_regulators;
BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps13_regulators));
break;
case S2MPS14X:
rdev_num = ARRAY_SIZE(s2mps14_regulators);
regulators = s2mps14_regulators;
BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps14_regulators));
break;
case S2MPS15X:
rdev_num = ARRAY_SIZE(s2mps15_regulators);
regulators = s2mps15_regulators;
BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps15_regulators));
break;
case S2MPU02:
rdev_num = ARRAY_SIZE(s2mpu02_regulators);
regulators = s2mpu02_regulators;
BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mpu02_regulators));
break;
default:
dev_err(&pdev->dev, "Invalid device type: %u\n",
s2mps11->dev_type);
return -EINVAL;
}
s2mps11->ext_control_gpiod = devm_kcalloc(&pdev->dev, rdev_num,
sizeof(*s2mps11->ext_control_gpiod), GFP_KERNEL);
if (!s2mps11->ext_control_gpiod)
return -ENOMEM;
rdata = kcalloc(rdev_num, sizeof(*rdata), GFP_KERNEL);
if (!rdata)
return -ENOMEM;
for (i = 0; i < rdev_num; i++)
rdata[i].name = regulators[i].name;
ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, rdev_num);
if (ret)
goto out;
platform_set_drvdata(pdev, s2mps11);
config.dev = &pdev->dev;
config.regmap = iodev->regmap_pmic;
config.driver_data = s2mps11;
for (i = 0; i < rdev_num; i++) {
struct regulator_dev *regulator;
config.init_data = rdata[i].init_data;
config.of_node = rdata[i].of_node;
config.ena_gpiod = s2mps11->ext_control_gpiod[i];
/*
* Hand the GPIO descriptor management over to the regulator
* core, remove it from devres management.
*/
if (config.ena_gpiod)
devm_gpiod_unhinge(&pdev->dev, config.ena_gpiod);
regulator = devm_regulator_register(&pdev->dev,
®ulators[i], &config);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
dev_err(&pdev->dev, "regulator init failed for %d\n",
i);
goto out;
}
if (config.ena_gpiod) {
ret = s2mps14_pmic_enable_ext_control(s2mps11,
regulator);
if (ret < 0) {
dev_err(&pdev->dev,
"failed to enable GPIO control over %s: %d\n",
regulator->desc->name, ret);
goto out;
}
}
}
out:
kfree(rdata);
return ret;
}
static const struct platform_device_id s2mps11_pmic_id[] = {
{ "s2mps11-regulator", S2MPS11X},
{ "s2mps13-regulator", S2MPS13X},
{ "s2mps14-regulator", S2MPS14X},
{ "s2mps15-regulator", S2MPS15X},
{ "s2mpu02-regulator", S2MPU02},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
static struct platform_driver s2mps11_pmic_driver = {
.driver = {
.name = "s2mps11-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = s2mps11_pmic_probe,
.id_table = s2mps11_pmic_id,
};
module_platform_driver(s2mps11_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <[email protected]>");
MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/s2mps11.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2013 Samsung Electronics Co., Ltd
// http://www.samsung.com
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mpa01.h>
struct s2mpa01_info {
int ramp_delay24;
int ramp_delay3;
int ramp_delay5;
int ramp_delay16;
int ramp_delay7;
int ramp_delay8910;
};
static int get_ramp_delay(int ramp_delay)
{
unsigned char cnt = 0;
ramp_delay /= 6250;
while (true) {
ramp_delay = ramp_delay >> 1;
if (ramp_delay == 0)
break;
cnt++;
}
if (cnt > 3)
cnt = 3;
return cnt;
}
static int s2mpa01_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int old_selector,
unsigned int new_selector)
{
struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
unsigned int ramp_delay = 0;
int old_volt, new_volt;
switch (rdev_get_id(rdev)) {
case S2MPA01_BUCK2:
case S2MPA01_BUCK4:
ramp_delay = s2mpa01->ramp_delay24;
break;
case S2MPA01_BUCK3:
ramp_delay = s2mpa01->ramp_delay3;
break;
case S2MPA01_BUCK5:
ramp_delay = s2mpa01->ramp_delay5;
break;
case S2MPA01_BUCK1:
case S2MPA01_BUCK6:
ramp_delay = s2mpa01->ramp_delay16;
break;
case S2MPA01_BUCK7:
ramp_delay = s2mpa01->ramp_delay7;
break;
case S2MPA01_BUCK8:
case S2MPA01_BUCK9:
case S2MPA01_BUCK10:
ramp_delay = s2mpa01->ramp_delay8910;
break;
}
if (ramp_delay == 0)
ramp_delay = rdev->desc->ramp_delay;
old_volt = rdev->desc->min_uV + (rdev->desc->uV_step * old_selector);
new_volt = rdev->desc->min_uV + (rdev->desc->uV_step * new_selector);
return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
}
static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
unsigned int ramp_val, ramp_shift, ramp_reg = S2MPA01_REG_RAMP2;
unsigned int ramp_enable = 1, enable_shift = 0;
int ret;
switch (rdev_get_id(rdev)) {
case S2MPA01_BUCK1:
enable_shift = S2MPA01_BUCK1_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
if (ramp_delay > s2mpa01->ramp_delay16)
s2mpa01->ramp_delay16 = ramp_delay;
else
ramp_delay = s2mpa01->ramp_delay16;
ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
break;
case S2MPA01_BUCK2:
enable_shift = S2MPA01_BUCK2_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
if (ramp_delay > s2mpa01->ramp_delay24)
s2mpa01->ramp_delay24 = ramp_delay;
else
ramp_delay = s2mpa01->ramp_delay24;
ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
ramp_reg = S2MPA01_REG_RAMP1;
break;
case S2MPA01_BUCK3:
enable_shift = S2MPA01_BUCK3_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
s2mpa01->ramp_delay3 = ramp_delay;
ramp_shift = S2MPA01_BUCK3_RAMP_SHIFT;
ramp_reg = S2MPA01_REG_RAMP1;
break;
case S2MPA01_BUCK4:
enable_shift = S2MPA01_BUCK4_RAMP_EN_SHIFT;
if (!ramp_delay) {
ramp_enable = 0;
break;
}
if (ramp_delay > s2mpa01->ramp_delay24)
s2mpa01->ramp_delay24 = ramp_delay;
else
ramp_delay = s2mpa01->ramp_delay24;
ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
ramp_reg = S2MPA01_REG_RAMP1;
break;
case S2MPA01_BUCK5:
s2mpa01->ramp_delay5 = ramp_delay;
ramp_shift = S2MPA01_BUCK5_RAMP_SHIFT;
break;
case S2MPA01_BUCK6:
if (ramp_delay > s2mpa01->ramp_delay16)
s2mpa01->ramp_delay16 = ramp_delay;
else
ramp_delay = s2mpa01->ramp_delay16;
ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
break;
case S2MPA01_BUCK7:
s2mpa01->ramp_delay7 = ramp_delay;
ramp_shift = S2MPA01_BUCK7_RAMP_SHIFT;
break;
case S2MPA01_BUCK8:
case S2MPA01_BUCK9:
case S2MPA01_BUCK10:
if (ramp_delay > s2mpa01->ramp_delay8910)
s2mpa01->ramp_delay8910 = ramp_delay;
else
ramp_delay = s2mpa01->ramp_delay8910;
ramp_shift = S2MPA01_BUCK8910_RAMP_SHIFT;
break;
default:
return 0;
}
if (!ramp_enable)
goto ramp_disable;
/* Ramp delay can be enabled/disabled only for buck[1234] */
if (rdev_get_id(rdev) >= S2MPA01_BUCK1 &&
rdev_get_id(rdev) <= S2MPA01_BUCK4) {
ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
1 << enable_shift, 1 << enable_shift);
if (ret) {
dev_err(&rdev->dev, "failed to enable ramp rate\n");
return ret;
}
}
ramp_val = get_ramp_delay(ramp_delay);
return regmap_update_bits(rdev->regmap, ramp_reg, 0x3 << ramp_shift,
ramp_val << ramp_shift);
ramp_disable:
return regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
1 << enable_shift, 0);
}
static const struct regulator_ops s2mpa01_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
static const struct regulator_ops s2mpa01_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = s2mpa01_regulator_set_voltage_time_sel,
.set_ramp_delay = s2mpa01_set_ramp_delay,
};
#define regulator_desc_ldo(num, step) { \
.name = "LDO"#num, \
.of_match = of_match_ptr("LDO"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_LDO##num, \
.ops = &s2mpa01_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MIN_800_MV, \
.uV_step = step, \
.n_voltages = S2MPA01_LDO_N_VOLTAGES, \
.vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \
.vsel_mask = S2MPA01_LDO_VSEL_MASK, \
.enable_reg = S2MPA01_REG_L1CTRL + num - 1, \
.enable_mask = S2MPA01_ENABLE_MASK \
}
#define regulator_desc_buck1_4(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK##num, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MIN_600_MV, \
.uV_step = STEP_6_25_MV, \
.n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPA01_RAMP_DELAY, \
.vsel_reg = S2MPA01_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
.enable_reg = S2MPA01_REG_B1CTRL1 + (num - 1) * 2, \
.enable_mask = S2MPA01_ENABLE_MASK \
}
#define regulator_desc_buck5 { \
.name = "BUCK5", \
.of_match = of_match_ptr("BUCK5"), \
.regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK5, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MIN_800_MV, \
.uV_step = STEP_6_25_MV, \
.n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPA01_RAMP_DELAY, \
.vsel_reg = S2MPA01_REG_B5CTRL2, \
.vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
.enable_reg = S2MPA01_REG_B5CTRL1, \
.enable_mask = S2MPA01_ENABLE_MASK \
}
#define regulator_desc_buck6_10(num, min, step) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = S2MPA01_BUCK##num, \
.ops = &s2mpa01_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
.n_voltages = S2MPA01_BUCK_N_VOLTAGES, \
.ramp_delay = S2MPA01_RAMP_DELAY, \
.vsel_reg = S2MPA01_REG_B6CTRL2 + (num - 6) * 2, \
.vsel_mask = S2MPA01_BUCK_VSEL_MASK, \
.enable_reg = S2MPA01_REG_B6CTRL1 + (num - 6) * 2, \
.enable_mask = S2MPA01_ENABLE_MASK \
}
static const struct regulator_desc regulators[] = {
regulator_desc_ldo(1, STEP_25_MV),
regulator_desc_ldo(2, STEP_50_MV),
regulator_desc_ldo(3, STEP_50_MV),
regulator_desc_ldo(4, STEP_50_MV),
regulator_desc_ldo(5, STEP_25_MV),
regulator_desc_ldo(6, STEP_25_MV),
regulator_desc_ldo(7, STEP_50_MV),
regulator_desc_ldo(8, STEP_50_MV),
regulator_desc_ldo(9, STEP_50_MV),
regulator_desc_ldo(10, STEP_50_MV),
regulator_desc_ldo(11, STEP_50_MV),
regulator_desc_ldo(12, STEP_50_MV),
regulator_desc_ldo(13, STEP_50_MV),
regulator_desc_ldo(14, STEP_50_MV),
regulator_desc_ldo(15, STEP_50_MV),
regulator_desc_ldo(16, STEP_50_MV),
regulator_desc_ldo(17, STEP_50_MV),
regulator_desc_ldo(18, STEP_50_MV),
regulator_desc_ldo(19, STEP_50_MV),
regulator_desc_ldo(20, STEP_50_MV),
regulator_desc_ldo(21, STEP_50_MV),
regulator_desc_ldo(22, STEP_50_MV),
regulator_desc_ldo(23, STEP_50_MV),
regulator_desc_ldo(24, STEP_50_MV),
regulator_desc_ldo(25, STEP_50_MV),
regulator_desc_ldo(26, STEP_25_MV),
regulator_desc_buck1_4(1),
regulator_desc_buck1_4(2),
regulator_desc_buck1_4(3),
regulator_desc_buck1_4(4),
regulator_desc_buck5,
regulator_desc_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
regulator_desc_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
regulator_desc_buck6_10(8, MIN_800_MV, STEP_12_5_MV),
regulator_desc_buck6_10(9, MIN_1500_MV, STEP_12_5_MV),
regulator_desc_buck6_10(10, MIN_1000_MV, STEP_12_5_MV),
};
static int s2mpa01_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct s2mpa01_info *s2mpa01;
int i;
s2mpa01 = devm_kzalloc(&pdev->dev, sizeof(*s2mpa01), GFP_KERNEL);
if (!s2mpa01)
return -ENOMEM;
config.dev = iodev->dev;
config.regmap = iodev->regmap_pmic;
config.driver_data = s2mpa01;
for (i = 0; i < S2MPA01_REGULATOR_MAX; i++) {
struct regulator_dev *rdev;
rdev = devm_regulator_register(&pdev->dev,
®ulators[i], &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "regulator init failed for %d\n",
i);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id s2mpa01_pmic_id[] = {
{ "s2mpa01-pmic", 0},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mpa01_pmic_id);
static struct platform_driver s2mpa01_pmic_driver = {
.driver = {
.name = "s2mpa01-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = s2mpa01_pmic_probe,
.id_table = s2mpa01_pmic_id,
};
module_platform_driver(s2mpa01_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <[email protected]>");
MODULE_AUTHOR("Sachin Kamat <[email protected]>");
MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/s2mpa01.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2022 Collabora Ltd.
// Author: AngeloGioacchino Del Regno <[email protected]>
//
// Based on mt6323-regulator.c,
// Copyright (c) 2016 MediaTek Inc.
//
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6332/registers.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/mt6332-regulator.h>
#include <linux/regulator/of_regulator.h>
#define MT6332_LDO_MODE_NORMAL 0
#define MT6332_LDO_MODE_LP 1
/*
* MT6332 regulators information
*
* @desc: standard fields of regulator description.
* @qi: Mask for query enable signal status of regulators
* @vselon_reg: Register sections for hardware control mode of bucks
* @vselctrl_reg: Register for controlling the buck control mode.
* @vselctrl_mask: Mask for query buck's voltage control mode.
* @status_reg: Register for regulator enable status where qi unavailable
* @status_mask: Mask for querying regulator enable status
*/
struct mt6332_regulator_info {
struct regulator_desc desc;
u32 qi;
u32 vselon_reg;
u32 vselctrl_reg;
u32 vselctrl_mask;
u32 modeset_reg;
u32 modeset_mask;
u32 status_reg;
u32 status_mask;
};
#define MT6332_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
vosel, vosel_mask, voselon, vosel_ctrl) \
[MT6332_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6332_buck_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6332_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = (max - min)/step + 1, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(0), \
}, \
.qi = BIT(13), \
.vselon_reg = voselon, \
.vselctrl_reg = vosel_ctrl, \
.vselctrl_mask = BIT(1), \
.status_mask = 0, \
}
#define MT6332_LDO_LINEAR(match, vreg, min, max, step, volt_ranges, \
enreg, vosel, vosel_mask, voselon, \
vosel_ctrl, _modeset_reg, _modeset_mask) \
[MT6332_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6332_ldo_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6332_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = (max - min)/step + 1, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(0), \
}, \
.qi = BIT(15), \
.vselon_reg = voselon, \
.vselctrl_reg = vosel_ctrl, \
.vselctrl_mask = BIT(1), \
.modeset_reg = _modeset_reg, \
.modeset_mask = _modeset_mask, \
.status_mask = 0, \
}
#define MT6332_LDO_AO(match, vreg, ldo_volt_table, vosel, vosel_mask) \
[MT6332_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6332_volt_table_ao_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6332_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.volt_table = ldo_volt_table, \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
}, \
}
#define MT6332_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
vosel_mask, _modeset_reg, _modeset_mask) \
[MT6332_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6332_volt_table_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6332_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.volt_table = ldo_volt_table, \
.vsel_reg = vosel, \
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(enbit), \
}, \
.qi = BIT(15), \
.modeset_reg = _modeset_reg, \
.modeset_mask = _modeset_mask, \
.status_mask = 0, \
}
#define MT6332_REG_FIXED(match, vreg, enreg, enbit, qibit, volt, stbit) \
[MT6332_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
.ops = &mt6332_volt_fixed_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6332_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = 1, \
.enable_reg = enreg, \
.enable_mask = BIT(enbit), \
.min_uV = volt, \
}, \
.qi = BIT(qibit), \
.status_reg = MT6332_EN_STATUS0, \
.status_mask = BIT(stbit), \
}
static const struct linear_range boost_volt_range[] = {
REGULATOR_LINEAR_RANGE(3500000, 0, 0x7f, 31250),
};
static const struct linear_range buck_volt_range[] = {
REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
};
static const struct linear_range buck_pa_volt_range[] = {
REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
};
static const struct linear_range buck_rf_volt_range[] = {
REGULATOR_LINEAR_RANGE(1050000, 0, 0x7f, 9375),
};
static const unsigned int ldo_volt_table1[] = {
2800000, 3000000, 0, 3200000
};
static const unsigned int ldo_volt_table2[] = {
1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
};
static int mt6332_get_status(struct regulator_dev *rdev)
{
struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
u32 reg, en_mask, regval;
int ret;
if (info->qi > 0) {
reg = info->desc.enable_reg;
en_mask = info->qi;
} else {
reg = info->status_reg;
en_mask = info->status_mask;
}
ret = regmap_read(rdev->regmap, reg, ®val);
if (ret != 0) {
dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
return ret;
}
return (regval & en_mask) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
}
static int mt6332_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
int val;
switch (mode) {
case REGULATOR_MODE_STANDBY:
val = MT6332_LDO_MODE_LP;
break;
case REGULATOR_MODE_NORMAL:
val = MT6332_LDO_MODE_NORMAL;
break;
default:
return -EINVAL;
}
val <<= ffs(info->modeset_mask) - 1;
return regmap_update_bits(rdev->regmap, info->modeset_reg,
info->modeset_mask, val);
}
static unsigned int mt6332_ldo_get_mode(struct regulator_dev *rdev)
{
struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
unsigned int val;
int ret;
ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
if (ret < 0)
return ret;
val &= info->modeset_mask;
val >>= ffs(info->modeset_mask) - 1;
return (val & BIT(0)) ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops mt6332_buck_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6332_get_status,
};
static const struct regulator_ops mt6332_ldo_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6332_get_status,
.set_mode = mt6332_ldo_set_mode,
.get_mode = mt6332_ldo_get_mode,
};
static const struct regulator_ops mt6332_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6332_get_status,
.set_mode = mt6332_ldo_set_mode,
.get_mode = mt6332_ldo_get_mode,
};
static const struct regulator_ops mt6332_volt_table_ao_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
static const struct regulator_ops mt6332_volt_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6332_get_status,
};
/* The array is indexed by id(MT6332_ID_XXX) */
static struct mt6332_regulator_info mt6332_regulators[] = {
MT6332_BUCK("buck-vdram", VDRAM, 700000, 1493750, 6250, buck_volt_range,
MT6332_EN_STATUS0, MT6332_VDRAM_CON11, GENMASK(6, 0),
MT6332_VDRAM_CON12, MT6332_VDRAM_CON7),
MT6332_BUCK("buck-vdvfs2", VDVFS2, 700000, 1312500, 6250, buck_volt_range,
MT6332_VDVFS2_CON9, MT6332_VDVFS2_CON11, GENMASK(6, 0),
MT6332_VDVFS2_CON12, MT6332_VDVFS2_CON7),
MT6332_BUCK("buck-vpa", VPA, 500000, 3400000, 50000, buck_pa_volt_range,
MT6332_VPA_CON9, MT6332_VPA_CON11, GENMASK(5, 0),
MT6332_VPA_CON12, MT6332_VPA_CON7),
MT6332_BUCK("buck-vrf18a", VRF1, 1050000, 2240625, 9375, buck_rf_volt_range,
MT6332_VRF1_CON9, MT6332_VRF1_CON11, GENMASK(6, 0),
MT6332_VRF1_CON12, MT6332_VRF1_CON7),
MT6332_BUCK("buck-vrf18b", VRF2, 1050000, 2240625, 9375, buck_rf_volt_range,
MT6332_VRF2_CON9, MT6332_VRF2_CON11, GENMASK(6, 0),
MT6332_VRF2_CON12, MT6332_VRF2_CON7),
MT6332_BUCK("buck-vsbst", VSBST, 3500000, 7468750, 31250, boost_volt_range,
MT6332_VSBST_CON8, MT6332_VSBST_CON12, GENMASK(6, 0),
MT6332_VSBST_CON13, MT6332_VSBST_CON8),
MT6332_LDO("ldo-vauxb32", VAUXB32, ldo_volt_table1, MT6332_LDO_CON1, 10,
MT6332_LDO_CON9, GENMASK(6, 5), MT6332_LDO_CON1, GENMASK(1, 0)),
MT6332_REG_FIXED("ldo-vbif28", VBIF28, MT6332_LDO_CON2, 10, 0, 2800000, 1),
MT6332_REG_FIXED("ldo-vusb33", VUSB33, MT6332_LDO_CON3, 10, 0, 3300000, 2),
MT6332_LDO_LINEAR("ldo-vsram", VSRAM_DVFS2, 700000, 1493750, 6250, buck_volt_range,
MT6332_EN_STATUS0, MT6332_LDO_CON8, GENMASK(15, 9),
MT6332_VDVFS2_CON23, MT6332_VDVFS2_CON22,
MT6332_LDO_CON5, GENMASK(1, 0)),
MT6332_LDO_AO("ldo-vdig18", VDIG18, ldo_volt_table2, MT6332_LDO_CON12, GENMASK(11, 9)),
};
static int mt6332_set_buck_vosel_reg(struct platform_device *pdev)
{
struct mt6397_chip *mt6332 = dev_get_drvdata(pdev->dev.parent);
int i;
u32 regval;
for (i = 0; i < MT6332_ID_VREG_MAX; i++) {
if (mt6332_regulators[i].vselctrl_reg) {
if (regmap_read(mt6332->regmap,
mt6332_regulators[i].vselctrl_reg,
®val) < 0) {
dev_err(&pdev->dev,
"Failed to read buck ctrl\n");
return -EIO;
}
if (regval & mt6332_regulators[i].vselctrl_mask) {
mt6332_regulators[i].desc.vsel_reg =
mt6332_regulators[i].vselon_reg;
}
}
}
return 0;
}
static int mt6332_regulator_probe(struct platform_device *pdev)
{
struct mt6397_chip *mt6332 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = {};
struct regulator_dev *rdev;
int i;
u32 reg_value;
/* Query buck controller to select activated voltage register part */
if (mt6332_set_buck_vosel_reg(pdev))
return -EIO;
/* Read PMIC chip revision to update constraints and voltage table */
if (regmap_read(mt6332->regmap, MT6332_HWCID, ®_value) < 0) {
dev_err(&pdev->dev, "Failed to read Chip ID\n");
return -EIO;
}
reg_value &= GENMASK(7, 0);
dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
/*
* ChipID 0x10 is "MT6332 E1", has a different voltage table and
* it's currently not supported in this driver. Upon detection of
* this ID, refuse to register the regulators, as we will wrongly
* interpret the VSEL for this revision, potentially overvolting
* some device.
*/
if (reg_value == 0x10) {
dev_err(&pdev->dev, "Chip version not supported. Bailing out.\n");
return -EINVAL;
}
for (i = 0; i < MT6332_ID_VREG_MAX; i++) {
config.dev = &pdev->dev;
config.driver_data = &mt6332_regulators[i];
config.regmap = mt6332->regmap;
rdev = devm_regulator_register(&pdev->dev,
&mt6332_regulators[i].desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
mt6332_regulators[i].desc.name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id mt6332_platform_ids[] = {
{"mt6332-regulator", 0},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, mt6332_platform_ids);
static struct platform_driver mt6332_regulator_driver = {
.driver = {
.name = "mt6332-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mt6332_regulator_probe,
.id_table = mt6332_platform_ids,
};
module_platform_driver(mt6332_regulator_driver);
MODULE_AUTHOR("AngeloGioacchino Del Regno <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6332 PMIC");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/mt6332-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tps62360.c -- TI tps62360
*
* Driver for processor core supply tps62360, tps62361B, tps62362 and tps62363.
*
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/tps62360.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
/* Register definitions */
#define REG_VSET0 0
#define REG_VSET1 1
#define REG_VSET2 2
#define REG_VSET3 3
#define REG_CONTROL 4
#define REG_TEMP 5
#define REG_RAMPCTRL 6
#define REG_CHIPID 8
#define FORCE_PWM_ENABLE BIT(7)
enum chips {TPS62360, TPS62361, TPS62362, TPS62363};
#define TPS62360_BASE_VOLTAGE 770000
#define TPS62360_N_VOLTAGES 64
#define TPS62361_BASE_VOLTAGE 500000
#define TPS62361_N_VOLTAGES 128
/* tps 62360 chip information */
struct tps62360_chip {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *rdev;
struct regmap *regmap;
struct gpio_desc *vsel0_gpio;
struct gpio_desc *vsel1_gpio;
u8 voltage_reg_mask;
bool en_internal_pulldn;
bool en_discharge;
bool valid_gpios;
int lru_index[4];
int curr_vset_vsel[4];
int curr_vset_id;
};
/*
* find_voltage_set_register: Find new voltage configuration register
* (VSET) id.
* The finding of the new VSET register will be based on the LRU mechanism.
* Each VSET register will have different voltage configured . This
* Function will look if any of the VSET register have requested voltage set
* or not.
* - If it is already there then it will make that register as most
* recently used and return as found so that caller need not to set
* the VSET register but need to set the proper gpios to select this
* VSET register.
* - If requested voltage is not found then it will use the least
* recently mechanism to get new VSET register for new configuration
* and will return not_found so that caller need to set new VSET
* register and then gpios (both).
*/
static bool find_voltage_set_register(struct tps62360_chip *tps,
int req_vsel, int *vset_reg_id)
{
int i;
bool found = false;
int new_vset_reg = tps->lru_index[3];
int found_index = 3;
for (i = 0; i < 4; ++i) {
if (tps->curr_vset_vsel[tps->lru_index[i]] == req_vsel) {
new_vset_reg = tps->lru_index[i];
found_index = i;
found = true;
goto update_lru_index;
}
}
update_lru_index:
for (i = found_index; i > 0; i--)
tps->lru_index[i] = tps->lru_index[i - 1];
tps->lru_index[0] = new_vset_reg;
*vset_reg_id = new_vset_reg;
return found;
}
static int tps62360_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct tps62360_chip *tps = rdev_get_drvdata(dev);
int vsel;
unsigned int data;
int ret;
ret = regmap_read(tps->regmap, REG_VSET0 + tps->curr_vset_id, &data);
if (ret < 0) {
dev_err(tps->dev, "%s(): register %d read failed with err %d\n",
__func__, REG_VSET0 + tps->curr_vset_id, ret);
return ret;
}
vsel = (int)data & tps->voltage_reg_mask;
return vsel;
}
static int tps62360_dcdc_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
struct tps62360_chip *tps = rdev_get_drvdata(dev);
int ret;
bool found = false;
int new_vset_id = tps->curr_vset_id;
/*
* If gpios are available to select the VSET register then least
* recently used register for new configuration.
*/
if (tps->valid_gpios)
found = find_voltage_set_register(tps, selector, &new_vset_id);
if (!found) {
ret = regmap_update_bits(tps->regmap, REG_VSET0 + new_vset_id,
tps->voltage_reg_mask, selector);
if (ret < 0) {
dev_err(tps->dev,
"%s(): register %d update failed with err %d\n",
__func__, REG_VSET0 + new_vset_id, ret);
return ret;
}
tps->curr_vset_id = new_vset_id;
tps->curr_vset_vsel[new_vset_id] = selector;
}
/* Select proper VSET register vio gpios */
if (tps->valid_gpios) {
gpiod_set_value_cansleep(tps->vsel0_gpio, new_vset_id & 0x1);
gpiod_set_value_cansleep(tps->vsel1_gpio,
(new_vset_id >> 1) & 0x1);
}
return 0;
}
static int tps62360_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct tps62360_chip *tps = rdev_get_drvdata(rdev);
int i;
int val;
int ret;
/* Enable force PWM mode in FAST mode only. */
switch (mode) {
case REGULATOR_MODE_FAST:
val = FORCE_PWM_ENABLE;
break;
case REGULATOR_MODE_NORMAL:
val = 0;
break;
default:
return -EINVAL;
}
if (!tps->valid_gpios) {
ret = regmap_update_bits(tps->regmap,
REG_VSET0 + tps->curr_vset_id, FORCE_PWM_ENABLE, val);
if (ret < 0)
dev_err(tps->dev,
"%s(): register %d update failed with err %d\n",
__func__, REG_VSET0 + tps->curr_vset_id, ret);
return ret;
}
/* If gpios are valid then all register set need to be control */
for (i = 0; i < 4; ++i) {
ret = regmap_update_bits(tps->regmap,
REG_VSET0 + i, FORCE_PWM_ENABLE, val);
if (ret < 0) {
dev_err(tps->dev,
"%s(): register %d update failed with err %d\n",
__func__, REG_VSET0 + i, ret);
return ret;
}
}
return ret;
}
static unsigned int tps62360_get_mode(struct regulator_dev *rdev)
{
struct tps62360_chip *tps = rdev_get_drvdata(rdev);
unsigned int data;
int ret;
ret = regmap_read(tps->regmap, REG_VSET0 + tps->curr_vset_id, &data);
if (ret < 0) {
dev_err(tps->dev, "%s(): register %d read failed with err %d\n",
__func__, REG_VSET0 + tps->curr_vset_id, ret);
return ret;
}
return (data & FORCE_PWM_ENABLE) ?
REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops tps62360_dcdc_ops = {
.get_voltage_sel = tps62360_dcdc_get_voltage_sel,
.set_voltage_sel = tps62360_dcdc_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_mode = tps62360_set_mode,
.get_mode = tps62360_get_mode,
};
static int tps62360_init_dcdc(struct tps62360_chip *tps,
struct tps62360_regulator_platform_data *pdata)
{
int ret;
unsigned int ramp_ctrl;
/* Initialize internal pull up/down control */
if (tps->en_internal_pulldn)
ret = regmap_write(tps->regmap, REG_CONTROL, 0xE0);
else
ret = regmap_write(tps->regmap, REG_CONTROL, 0x0);
if (ret < 0) {
dev_err(tps->dev,
"%s(): register %d write failed with err %d\n",
__func__, REG_CONTROL, ret);
return ret;
}
/* Reset output discharge path to reduce power consumption */
ret = regmap_update_bits(tps->regmap, REG_RAMPCTRL, BIT(2), 0);
if (ret < 0) {
dev_err(tps->dev,
"%s(): register %d update failed with err %d\n",
__func__, REG_RAMPCTRL, ret);
return ret;
}
/* Get ramp value from ramp control register */
ret = regmap_read(tps->regmap, REG_RAMPCTRL, &ramp_ctrl);
if (ret < 0) {
dev_err(tps->dev,
"%s(): register %d read failed with err %d\n",
__func__, REG_RAMPCTRL, ret);
return ret;
}
ramp_ctrl = (ramp_ctrl >> 5) & 0x7;
/* ramp mV/us = 32/(2^ramp_ctrl) */
tps->desc.ramp_delay = DIV_ROUND_UP(32000, BIT(ramp_ctrl));
return ret;
}
static const struct regmap_config tps62360_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = REG_CHIPID,
.cache_type = REGCACHE_RBTREE,
};
static struct tps62360_regulator_platform_data *
of_get_tps62360_platform_data(struct device *dev,
const struct regulator_desc *desc)
{
struct tps62360_regulator_platform_data *pdata;
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node,
desc);
if (!pdata->reg_init_data) {
dev_err(dev, "Not able to get OF regulator init data\n");
return NULL;
}
pdata->vsel0_def_state = of_property_read_bool(np, "ti,vsel0-state-high");
pdata->vsel1_def_state = of_property_read_bool(np, "ti,vsel1-state-high");
pdata->en_internal_pulldn = of_property_read_bool(np, "ti,enable-pull-down");
pdata->en_discharge = of_property_read_bool(np, "ti,enable-vout-discharge");
return pdata;
}
#if defined(CONFIG_OF)
static const struct of_device_id tps62360_of_match[] = {
{ .compatible = "ti,tps62360", .data = (void *)TPS62360},
{ .compatible = "ti,tps62361", .data = (void *)TPS62361},
{ .compatible = "ti,tps62362", .data = (void *)TPS62362},
{ .compatible = "ti,tps62363", .data = (void *)TPS62363},
{},
};
MODULE_DEVICE_TABLE(of, tps62360_of_match);
#endif
static int tps62360_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct regulator_config config = { };
struct tps62360_regulator_platform_data *pdata;
struct regulator_dev *rdev;
struct tps62360_chip *tps;
int ret;
int i;
int chip_id;
int gpio_flags;
pdata = dev_get_platdata(&client->dev);
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
tps->desc.name = client->name;
tps->desc.id = 0;
tps->desc.ops = &tps62360_dcdc_ops;
tps->desc.type = REGULATOR_VOLTAGE;
tps->desc.owner = THIS_MODULE;
tps->desc.uV_step = 10000;
if (client->dev.of_node) {
const struct of_device_id *match;
match = of_match_device(of_match_ptr(tps62360_of_match),
&client->dev);
if (!match) {
dev_err(&client->dev, "Error: No device match found\n");
return -ENODEV;
}
chip_id = (int)(long)match->data;
if (!pdata)
pdata = of_get_tps62360_platform_data(&client->dev,
&tps->desc);
} else if (id) {
chip_id = id->driver_data;
} else {
dev_err(&client->dev, "No device tree match or id table match found\n");
return -ENODEV;
}
if (!pdata) {
dev_err(&client->dev, "%s(): Platform data not found\n",
__func__);
return -EIO;
}
tps->en_discharge = pdata->en_discharge;
tps->en_internal_pulldn = pdata->en_internal_pulldn;
tps->dev = &client->dev;
switch (chip_id) {
case TPS62360:
case TPS62362:
tps->desc.min_uV = TPS62360_BASE_VOLTAGE;
tps->voltage_reg_mask = 0x3F;
tps->desc.n_voltages = TPS62360_N_VOLTAGES;
break;
case TPS62361:
case TPS62363:
tps->desc.min_uV = TPS62361_BASE_VOLTAGE;
tps->voltage_reg_mask = 0x7F;
tps->desc.n_voltages = TPS62361_N_VOLTAGES;
break;
default:
return -ENODEV;
}
tps->regmap = devm_regmap_init_i2c(client, &tps62360_regmap_config);
if (IS_ERR(tps->regmap)) {
ret = PTR_ERR(tps->regmap);
dev_err(&client->dev,
"%s(): regmap allocation failed with err %d\n",
__func__, ret);
return ret;
}
i2c_set_clientdata(client, tps);
tps->curr_vset_id = (pdata->vsel1_def_state & 1) * 2 +
(pdata->vsel0_def_state & 1);
tps->lru_index[0] = tps->curr_vset_id;
tps->valid_gpios = false;
gpio_flags = (pdata->vsel0_def_state) ?
GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
tps->vsel0_gpio = devm_gpiod_get_optional(&client->dev, "vsel0", gpio_flags);
if (IS_ERR(tps->vsel0_gpio)) {
dev_err(&client->dev,
"%s(): Could not obtain vsel0 GPIO: %ld\n",
__func__, PTR_ERR(tps->vsel0_gpio));
return PTR_ERR(tps->vsel0_gpio);
}
gpio_flags = (pdata->vsel1_def_state) ?
GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
tps->vsel1_gpio = devm_gpiod_get_optional(&client->dev, "vsel1", gpio_flags);
if (IS_ERR(tps->vsel1_gpio)) {
dev_err(&client->dev,
"%s(): Could not obtain vsel1 GPIO: %ld\n",
__func__, PTR_ERR(tps->vsel1_gpio));
return PTR_ERR(tps->vsel1_gpio);
}
if (tps->vsel0_gpio != NULL && tps->vsel1_gpio != NULL) {
tps->valid_gpios = true;
/*
* Initialize the lru index with vset_reg id
* The index 0 will be most recently used and
* set with the tps->curr_vset_id */
for (i = 0; i < 4; ++i)
tps->lru_index[i] = i;
tps->lru_index[0] = tps->curr_vset_id;
tps->lru_index[tps->curr_vset_id] = 0;
}
ret = tps62360_init_dcdc(tps, pdata);
if (ret < 0) {
dev_err(tps->dev, "%s(): Init failed with err = %d\n",
__func__, ret);
return ret;
}
config.dev = &client->dev;
config.init_data = pdata->reg_init_data;
config.driver_data = tps;
config.of_node = client->dev.of_node;
/* Register the regulators */
rdev = devm_regulator_register(&client->dev, &tps->desc, &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev,
"%s(): regulator register failed with err %s\n",
__func__, id->name);
return PTR_ERR(rdev);
}
tps->rdev = rdev;
return 0;
}
static void tps62360_shutdown(struct i2c_client *client)
{
struct tps62360_chip *tps = i2c_get_clientdata(client);
int st;
if (!tps->en_discharge)
return;
/* Configure the output discharge path */
st = regmap_update_bits(tps->regmap, REG_RAMPCTRL, BIT(2), BIT(2));
if (st < 0)
dev_err(tps->dev,
"%s(): register %d update failed with err %d\n",
__func__, REG_RAMPCTRL, st);
}
static const struct i2c_device_id tps62360_id[] = {
{.name = "tps62360", .driver_data = TPS62360},
{.name = "tps62361", .driver_data = TPS62361},
{.name = "tps62362", .driver_data = TPS62362},
{.name = "tps62363", .driver_data = TPS62363},
{},
};
MODULE_DEVICE_TABLE(i2c, tps62360_id);
static struct i2c_driver tps62360_i2c_driver = {
.driver = {
.name = "tps62360",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(tps62360_of_match),
},
.probe = tps62360_probe,
.shutdown = tps62360_shutdown,
.id_table = tps62360_id,
};
static int __init tps62360_init(void)
{
return i2c_add_driver(&tps62360_i2c_driver);
}
subsys_initcall(tps62360_init);
static void __exit tps62360_cleanup(void)
{
i2c_del_driver(&tps62360_i2c_driver);
}
module_exit(tps62360_cleanup);
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_DESCRIPTION("TPS6236x voltage regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/tps62360-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI LP8788 MFD - ldo regulator driver
*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/gpio/consumer.h>
#include <linux/mfd/lp8788.h>
/* register address */
#define LP8788_EN_LDO_A 0x0D /* DLDO 1 ~ 8 */
#define LP8788_EN_LDO_B 0x0E /* DLDO 9 ~ 12, ALDO 1 ~ 4 */
#define LP8788_EN_LDO_C 0x0F /* ALDO 5 ~ 10 */
#define LP8788_EN_SEL 0x10
#define LP8788_DLDO1_VOUT 0x2E
#define LP8788_DLDO2_VOUT 0x2F
#define LP8788_DLDO3_VOUT 0x30
#define LP8788_DLDO4_VOUT 0x31
#define LP8788_DLDO5_VOUT 0x32
#define LP8788_DLDO6_VOUT 0x33
#define LP8788_DLDO7_VOUT 0x34
#define LP8788_DLDO8_VOUT 0x35
#define LP8788_DLDO9_VOUT 0x36
#define LP8788_DLDO10_VOUT 0x37
#define LP8788_DLDO11_VOUT 0x38
#define LP8788_DLDO12_VOUT 0x39
#define LP8788_ALDO1_VOUT 0x3A
#define LP8788_ALDO2_VOUT 0x3B
#define LP8788_ALDO3_VOUT 0x3C
#define LP8788_ALDO4_VOUT 0x3D
#define LP8788_ALDO5_VOUT 0x3E
#define LP8788_ALDO6_VOUT 0x3F
#define LP8788_ALDO7_VOUT 0x40
#define LP8788_ALDO8_VOUT 0x41
#define LP8788_ALDO9_VOUT 0x42
#define LP8788_ALDO10_VOUT 0x43
#define LP8788_DLDO1_TIMESTEP 0x44
/* mask/shift bits */
#define LP8788_EN_DLDO1_M BIT(0) /* Addr 0Dh ~ 0Fh */
#define LP8788_EN_DLDO2_M BIT(1)
#define LP8788_EN_DLDO3_M BIT(2)
#define LP8788_EN_DLDO4_M BIT(3)
#define LP8788_EN_DLDO5_M BIT(4)
#define LP8788_EN_DLDO6_M BIT(5)
#define LP8788_EN_DLDO7_M BIT(6)
#define LP8788_EN_DLDO8_M BIT(7)
#define LP8788_EN_DLDO9_M BIT(0)
#define LP8788_EN_DLDO10_M BIT(1)
#define LP8788_EN_DLDO11_M BIT(2)
#define LP8788_EN_DLDO12_M BIT(3)
#define LP8788_EN_ALDO1_M BIT(4)
#define LP8788_EN_ALDO2_M BIT(5)
#define LP8788_EN_ALDO3_M BIT(6)
#define LP8788_EN_ALDO4_M BIT(7)
#define LP8788_EN_ALDO5_M BIT(0)
#define LP8788_EN_ALDO6_M BIT(1)
#define LP8788_EN_ALDO7_M BIT(2)
#define LP8788_EN_ALDO8_M BIT(3)
#define LP8788_EN_ALDO9_M BIT(4)
#define LP8788_EN_ALDO10_M BIT(5)
#define LP8788_EN_SEL_DLDO911_M BIT(0) /* Addr 10h */
#define LP8788_EN_SEL_DLDO7_M BIT(1)
#define LP8788_EN_SEL_ALDO7_M BIT(2)
#define LP8788_EN_SEL_ALDO5_M BIT(3)
#define LP8788_EN_SEL_ALDO234_M BIT(4)
#define LP8788_EN_SEL_ALDO1_M BIT(5)
#define LP8788_VOUT_5BIT_M 0x1F /* Addr 2Eh ~ 43h */
#define LP8788_VOUT_4BIT_M 0x0F
#define LP8788_VOUT_3BIT_M 0x07
#define LP8788_VOUT_1BIT_M 0x01
#define LP8788_STARTUP_TIME_M 0xF8 /* Addr 44h ~ 59h */
#define LP8788_STARTUP_TIME_S 3
#define ENABLE_TIME_USEC 32
enum lp8788_ldo_id {
DLDO1,
DLDO2,
DLDO3,
DLDO4,
DLDO5,
DLDO6,
DLDO7,
DLDO8,
DLDO9,
DLDO10,
DLDO11,
DLDO12,
ALDO1,
ALDO2,
ALDO3,
ALDO4,
ALDO5,
ALDO6,
ALDO7,
ALDO8,
ALDO9,
ALDO10,
};
struct lp8788_ldo {
struct lp8788 *lp;
struct regulator_desc *desc;
struct regulator_dev *regulator;
struct gpio_desc *ena_gpiod;
};
/* DLDO 1, 2, 3, 9 voltage table */
static const int lp8788_dldo1239_vtbl[] = {
1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
2600000, 2700000, 2800000, 2900000, 3000000, 2850000, 2850000, 2850000,
2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000,
2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000,
};
/* DLDO 4 voltage table */
static const int lp8788_dldo4_vtbl[] = { 1800000, 3000000 };
/* DLDO 5, 7, 8 and ALDO 6 voltage table */
static const int lp8788_dldo578_aldo6_vtbl[] = {
1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
2600000, 2700000, 2800000, 2900000, 3000000, 3000000, 3000000, 3000000,
};
/* DLDO 6 voltage table */
static const int lp8788_dldo6_vtbl[] = {
3000000, 3100000, 3200000, 3300000, 3400000, 3500000, 3600000, 3600000,
};
/* DLDO 10, 11 voltage table */
static const int lp8788_dldo1011_vtbl[] = {
1100000, 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000,
1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000,
};
/* ALDO 1 voltage table */
static const int lp8788_aldo1_vtbl[] = { 1800000, 2850000 };
/* ALDO 7 voltage table */
static const int lp8788_aldo7_vtbl[] = {
1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
};
static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
{
struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
enum lp8788_ldo_id id = rdev_get_id(rdev);
u8 val, addr = LP8788_DLDO1_TIMESTEP + id;
if (lp8788_read_byte(ldo->lp, addr, &val))
return -EINVAL;
val = (val & LP8788_STARTUP_TIME_M) >> LP8788_STARTUP_TIME_S;
return ENABLE_TIME_USEC * val;
}
static const struct regulator_ops lp8788_ldo_voltage_table_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp8788_ldo_enable_time,
};
static const struct regulator_ops lp8788_ldo_voltage_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp8788_ldo_enable_time,
};
static const struct regulator_desc lp8788_dldo_desc[] = {
{
.name = "dldo1",
.id = DLDO1,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
.volt_table = lp8788_dldo1239_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO1_VOUT,
.vsel_mask = LP8788_VOUT_5BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO1_M,
},
{
.name = "dldo2",
.id = DLDO2,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
.volt_table = lp8788_dldo1239_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO2_VOUT,
.vsel_mask = LP8788_VOUT_5BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO2_M,
},
{
.name = "dldo3",
.id = DLDO3,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
.volt_table = lp8788_dldo1239_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO3_VOUT,
.vsel_mask = LP8788_VOUT_5BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO3_M,
},
{
.name = "dldo4",
.id = DLDO4,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo4_vtbl),
.volt_table = lp8788_dldo4_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO4_VOUT,
.vsel_mask = LP8788_VOUT_1BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO4_M,
},
{
.name = "dldo5",
.id = DLDO5,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
.volt_table = lp8788_dldo578_aldo6_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO5_VOUT,
.vsel_mask = LP8788_VOUT_4BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO5_M,
},
{
.name = "dldo6",
.id = DLDO6,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo6_vtbl),
.volt_table = lp8788_dldo6_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO6_VOUT,
.vsel_mask = LP8788_VOUT_3BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO6_M,
},
{
.name = "dldo7",
.id = DLDO7,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
.volt_table = lp8788_dldo578_aldo6_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO7_VOUT,
.vsel_mask = LP8788_VOUT_4BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO7_M,
},
{
.name = "dldo8",
.id = DLDO8,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
.volt_table = lp8788_dldo578_aldo6_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO8_VOUT,
.vsel_mask = LP8788_VOUT_4BIT_M,
.enable_reg = LP8788_EN_LDO_A,
.enable_mask = LP8788_EN_DLDO8_M,
},
{
.name = "dldo9",
.id = DLDO9,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
.volt_table = lp8788_dldo1239_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO9_VOUT,
.vsel_mask = LP8788_VOUT_5BIT_M,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_DLDO9_M,
},
{
.name = "dldo10",
.id = DLDO10,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl),
.volt_table = lp8788_dldo1011_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO10_VOUT,
.vsel_mask = LP8788_VOUT_4BIT_M,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_DLDO10_M,
},
{
.name = "dldo11",
.id = DLDO11,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl),
.volt_table = lp8788_dldo1011_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_DLDO11_VOUT,
.vsel_mask = LP8788_VOUT_4BIT_M,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_DLDO11_M,
},
{
.name = "dldo12",
.id = DLDO12,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_DLDO12_M,
.min_uV = 2500000,
},
};
static const struct regulator_desc lp8788_aldo_desc[] = {
{
.name = "aldo1",
.id = ALDO1,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_aldo1_vtbl),
.volt_table = lp8788_aldo1_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_ALDO1_VOUT,
.vsel_mask = LP8788_VOUT_1BIT_M,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_ALDO1_M,
},
{
.name = "aldo2",
.id = ALDO2,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_ALDO2_M,
.min_uV = 2850000,
},
{
.name = "aldo3",
.id = ALDO3,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_ALDO3_M,
.min_uV = 2850000,
},
{
.name = "aldo4",
.id = ALDO4,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_ALDO4_M,
.min_uV = 2850000,
},
{
.name = "aldo5",
.id = ALDO5,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO5_M,
.min_uV = 2850000,
},
{
.name = "aldo6",
.id = ALDO6,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
.volt_table = lp8788_dldo578_aldo6_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_ALDO6_VOUT,
.vsel_mask = LP8788_VOUT_4BIT_M,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO6_M,
},
{
.name = "aldo7",
.id = ALDO7,
.ops = &lp8788_ldo_voltage_table_ops,
.n_voltages = ARRAY_SIZE(lp8788_aldo7_vtbl),
.volt_table = lp8788_aldo7_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_ALDO7_VOUT,
.vsel_mask = LP8788_VOUT_3BIT_M,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO7_M,
},
{
.name = "aldo8",
.id = ALDO8,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO8_M,
.min_uV = 2500000,
},
{
.name = "aldo9",
.id = ALDO9,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO9_M,
.min_uV = 2500000,
},
{
.name = "aldo10",
.id = ALDO10,
.ops = &lp8788_ldo_voltage_fixed_ops,
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO10_M,
.min_uV = 1100000,
},
};
static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
struct lp8788_ldo *ldo,
enum lp8788_ldo_id id)
{
struct lp8788 *lp = ldo->lp;
enum lp8788_ext_ldo_en_id enable_id;
static const u8 en_mask[] = {
[EN_ALDO1] = LP8788_EN_SEL_ALDO1_M,
[EN_ALDO234] = LP8788_EN_SEL_ALDO234_M,
[EN_ALDO5] = LP8788_EN_SEL_ALDO5_M,
[EN_ALDO7] = LP8788_EN_SEL_ALDO7_M,
[EN_DLDO7] = LP8788_EN_SEL_DLDO7_M,
[EN_DLDO911] = LP8788_EN_SEL_DLDO911_M,
};
switch (id) {
case DLDO7:
enable_id = EN_DLDO7;
break;
case DLDO9:
case DLDO11:
enable_id = EN_DLDO911;
break;
case ALDO1:
enable_id = EN_ALDO1;
break;
case ALDO2 ... ALDO4:
enable_id = EN_ALDO234;
break;
case ALDO5:
enable_id = EN_ALDO5;
break;
case ALDO7:
enable_id = EN_ALDO7;
break;
default:
return 0;
}
/*
* Do not use devm* here: the regulator core takes over the
* lifecycle management of the GPIO descriptor.
* FIXME: check default mode for GPIO here: high or low?
*/
ldo->ena_gpiod = gpiod_get_index_optional(&pdev->dev,
"enable",
enable_id,
GPIOD_OUT_HIGH |
GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(ldo->ena_gpiod))
return PTR_ERR(ldo->ena_gpiod);
/* if no GPIO for ldo pin, then set default enable mode */
if (!ldo->ena_gpiod)
goto set_default_ldo_enable_mode;
return 0;
set_default_ldo_enable_mode:
return lp8788_update_bits(lp, LP8788_EN_SEL, en_mask[enable_id], 0);
}
static int lp8788_dldo_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
int id = pdev->id;
struct lp8788_ldo *ldo;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
int ret;
ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
if (!ldo)
return -ENOMEM;
ldo->lp = lp;
ret = lp8788_config_ldo_enable_mode(pdev, ldo, id);
if (ret)
return ret;
if (ldo->ena_gpiod)
cfg.ena_gpiod = ldo->ena_gpiod;
cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
rdev = devm_regulator_register(&pdev->dev, &lp8788_dldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "DLDO%d regulator register err = %d\n",
id + 1, ret);
return ret;
}
ldo->regulator = rdev;
platform_set_drvdata(pdev, ldo);
return 0;
}
static struct platform_driver lp8788_dldo_driver = {
.probe = lp8788_dldo_probe,
.driver = {
.name = LP8788_DEV_DLDO,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int lp8788_aldo_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
int id = pdev->id;
struct lp8788_ldo *ldo;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
int ret;
ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
if (!ldo)
return -ENOMEM;
ldo->lp = lp;
ret = lp8788_config_ldo_enable_mode(pdev, ldo, id + ALDO1);
if (ret)
return ret;
if (ldo->ena_gpiod)
cfg.ena_gpiod = ldo->ena_gpiod;
cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
cfg.driver_data = ldo;
cfg.regmap = lp->regmap;
rdev = devm_regulator_register(&pdev->dev, &lp8788_aldo_desc[id], &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev, "ALDO%d regulator register err = %d\n",
id + 1, ret);
return ret;
}
ldo->regulator = rdev;
platform_set_drvdata(pdev, ldo);
return 0;
}
static struct platform_driver lp8788_aldo_driver = {
.probe = lp8788_aldo_probe,
.driver = {
.name = LP8788_DEV_ALDO,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static struct platform_driver * const drivers[] = {
&lp8788_dldo_driver,
&lp8788_aldo_driver,
};
static int __init lp8788_ldo_init(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
subsys_initcall(lp8788_ldo_init);
static void __exit lp8788_ldo_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(lp8788_ldo_exit);
MODULE_DESCRIPTION("TI LP8788 LDO Driver");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:lp8788-dldo");
MODULE_ALIAS("platform:lp8788-aldo");
| linux-master | drivers/regulator/lp8788-ldo.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) ST-Ericsson SA 2010
*
* Authors: Bengt Jonsson <[email protected]>
*
* This file is based on drivers/regulator/ab8500.c
*
* AB8500 external regulators
*
* ab8500-ext supports the following regulators:
* - VextSupply3
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
/* AB8500 external regulators */
enum ab8500_ext_regulator_id {
AB8500_EXT_SUPPLY1,
AB8500_EXT_SUPPLY2,
AB8500_EXT_SUPPLY3,
AB8500_NUM_EXT_REGULATORS,
};
struct ab8500_ext_regulator_cfg {
bool hwreq; /* requires hw mode or high power mode */
};
/* supply for VextSupply3 */
static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
/* SIM supply for 3 V SIM cards */
REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
};
/*
* AB8500 external regulators
*/
static struct regulator_init_data ab8500_ext_regulators[] = {
/* fixed Vbat supplies VSMPS1_EXT_1V8 */
[AB8500_EXT_SUPPLY1] = {
.constraints = {
.name = "ab8500-ext-supply1",
.min_uV = 1800000,
.max_uV = 1800000,
.initial_mode = REGULATOR_MODE_IDLE,
.boot_on = 1,
.always_on = 1,
},
},
/* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
[AB8500_EXT_SUPPLY2] = {
.constraints = {
.name = "ab8500-ext-supply2",
.min_uV = 1360000,
.max_uV = 1360000,
},
},
/* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
[AB8500_EXT_SUPPLY3] = {
.constraints = {
.name = "ab8500-ext-supply3",
.min_uV = 3400000,
.max_uV = 3400000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.boot_on = 1,
},
.num_consumer_supplies =
ARRAY_SIZE(ab8500_ext_supply3_consumers),
.consumer_supplies = ab8500_ext_supply3_consumers,
},
};
/**
* struct ab8500_ext_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
* @cfg: regulator configuration (extension of regulator FW configuration)
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
* @update_mask: mask to enable/disable and set mode of regulator
* @update_val: bits holding the regulator current mode
* @update_val_hp: bits to set EN pin active (LPn pin deactive)
* normally this means high power mode
* @update_val_lp: bits to set EN pin active and LPn pin active
* normally this means low power mode
* @update_val_hw: bits to set regulator pins in HW control
* SysClkReq pins and logic will choose mode
*/
struct ab8500_ext_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct ab8500_ext_regulator_cfg *cfg;
u8 update_bank;
u8 update_reg;
u8 update_mask;
u8 update_val;
u8 update_val_hp;
u8 update_val_lp;
u8 update_val_hw;
};
static int ab8500_ext_regulator_enable(struct regulator_dev *rdev)
{
int ret;
struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
/*
* To satisfy both HW high power request and SW request, the regulator
* must be on in high power.
*/
if (info->cfg && info->cfg->hwreq)
regval = info->update_val_hp;
else
regval = info->update_val;
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
info->update_mask, regval);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
return ret;
}
dev_dbg(rdev_get_dev(rdev),
"%s-enable (bank, reg, mask, value): 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
info->desc.name, info->update_bank, info->update_reg,
info->update_mask, regval);
return 0;
}
static int ab8500_ext_regulator_disable(struct regulator_dev *rdev)
{
int ret;
struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
/*
* Set the regulator in HW request mode if configured
*/
if (info->cfg && info->cfg->hwreq)
regval = info->update_val_hw;
else
regval = 0;
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
info->update_mask, regval);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
return ret;
}
dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):"
" 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
info->desc.name, info->update_bank, info->update_reg,
info->update_mask, regval);
return 0;
}
static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev)
{
int ret;
struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
ret = abx500_get_register_interruptible(info->dev,
info->update_bank, info->update_reg, ®val);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't read 0x%x register\n", info->update_reg);
return ret;
}
dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):"
" 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
info->desc.name, info->update_bank, info->update_reg,
info->update_mask, regval);
if (((regval & info->update_mask) == info->update_val_lp) ||
((regval & info->update_mask) == info->update_val_hp))
return 1;
else
return 0;
}
static int ab8500_ext_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
int ret = 0;
struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
switch (mode) {
case REGULATOR_MODE_NORMAL:
regval = info->update_val_hp;
break;
case REGULATOR_MODE_IDLE:
regval = info->update_val_lp;
break;
default:
return -EINVAL;
}
/* If regulator is enabled and info->cfg->hwreq is set, the regulator
must be on in high power, so we don't need to write the register with
the same value.
*/
if (ab8500_ext_regulator_is_enabled(rdev) &&
!(info->cfg && info->cfg->hwreq)) {
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
info->update_mask, regval);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"Could not set regulator mode.\n");
return ret;
}
dev_dbg(rdev_get_dev(rdev),
"%s-set_mode (bank, reg, mask, value): "
"0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
info->update_mask, regval);
}
info->update_val = regval;
return 0;
}
static unsigned int ab8500_ext_regulator_get_mode(struct regulator_dev *rdev)
{
struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL) {
dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
}
if (info->update_val == info->update_val_hp)
ret = REGULATOR_MODE_NORMAL;
else if (info->update_val == info->update_val_lp)
ret = REGULATOR_MODE_IDLE;
else
ret = -EINVAL;
return ret;
}
static int ab8500_ext_set_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
struct regulation_constraints *regu_constraints = rdev->constraints;
if (!regu_constraints) {
dev_err(rdev_get_dev(rdev), "No regulator constraints\n");
return -EINVAL;
}
if (regu_constraints->min_uV == min_uV &&
regu_constraints->max_uV == max_uV)
return 0;
dev_err(rdev_get_dev(rdev),
"Requested min %duV max %duV != constrained min %duV max %duV\n",
min_uV, max_uV,
regu_constraints->min_uV, regu_constraints->max_uV);
return -EINVAL;
}
static int ab8500_ext_list_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct regulation_constraints *regu_constraints = rdev->constraints;
if (regu_constraints == NULL) {
dev_err(rdev_get_dev(rdev), "regulator constraints null pointer\n");
return -EINVAL;
}
/* return the uV for the fixed regulators */
if (regu_constraints->min_uV && regu_constraints->max_uV) {
if (regu_constraints->min_uV == regu_constraints->max_uV)
return regu_constraints->min_uV;
}
return -EINVAL;
}
static const struct regulator_ops ab8500_ext_regulator_ops = {
.enable = ab8500_ext_regulator_enable,
.disable = ab8500_ext_regulator_disable,
.is_enabled = ab8500_ext_regulator_is_enabled,
.set_mode = ab8500_ext_regulator_set_mode,
.get_mode = ab8500_ext_regulator_get_mode,
.set_voltage = ab8500_ext_set_voltage,
.list_voltage = ab8500_ext_list_voltage,
};
static struct ab8500_ext_regulator_info
ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = {
[AB8500_EXT_SUPPLY1] = {
.desc = {
.name = "VEXTSUPPLY1",
.of_match = of_match_ptr("ab8500_ext1"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY1,
.owner = THIS_MODULE,
.n_voltages = 1,
},
.update_bank = 0x04,
.update_reg = 0x08,
.update_mask = 0x03,
.update_val = 0x01,
.update_val_hp = 0x01,
.update_val_lp = 0x03,
.update_val_hw = 0x02,
},
[AB8500_EXT_SUPPLY2] = {
.desc = {
.name = "VEXTSUPPLY2",
.of_match = of_match_ptr("ab8500_ext2"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY2,
.owner = THIS_MODULE,
.n_voltages = 1,
},
.update_bank = 0x04,
.update_reg = 0x08,
.update_mask = 0x0c,
.update_val = 0x04,
.update_val_hp = 0x04,
.update_val_lp = 0x0c,
.update_val_hw = 0x08,
},
[AB8500_EXT_SUPPLY3] = {
.desc = {
.name = "VEXTSUPPLY3",
.of_match = of_match_ptr("ab8500_ext3"),
.ops = &ab8500_ext_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_EXT_SUPPLY3,
.owner = THIS_MODULE,
.n_voltages = 1,
},
.update_bank = 0x04,
.update_reg = 0x08,
.update_mask = 0x30,
.update_val = 0x10,
.update_val_hp = 0x10,
.update_val_lp = 0x30,
.update_val_hw = 0x20,
},
};
static int ab8500_ext_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
int i;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
return -EINVAL;
}
/* check for AB8500 2.x */
if (is_ab8500_2p0_or_earlier(ab8500)) {
struct ab8500_ext_regulator_info *info;
/* VextSupply3LPn is inverted on AB8500 2.x */
info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3];
info->update_val = 0x30;
info->update_val_hp = 0x30;
info->update_val_lp = 0x10;
}
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
struct ab8500_ext_regulator_info *info = NULL;
/* assign per-regulator data */
info = &ab8500_ext_regulator_info[i];
info->dev = &pdev->dev;
info->cfg = (struct ab8500_ext_regulator_cfg *)
ab8500_ext_regulators[i].driver_data;
config.dev = &pdev->dev;
config.driver_data = info;
config.init_data = &ab8500_ext_regulators[i];
/* register regulator with framework */
rdev = devm_regulator_register(&pdev->dev, &info->desc,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
return PTR_ERR(rdev);
}
dev_dbg(&pdev->dev, "%s-probed\n", info->desc.name);
}
return 0;
}
static struct platform_driver ab8500_ext_regulator_driver = {
.probe = ab8500_ext_regulator_probe,
.driver = {
.name = "ab8500-ext-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init ab8500_ext_regulator_init(void)
{
int ret;
ret = platform_driver_register(&ab8500_ext_regulator_driver);
if (ret)
pr_err("Failed to register ab8500 ext regulator: %d\n", ret);
return ret;
}
subsys_initcall(ab8500_ext_regulator_init);
static void __exit ab8500_ext_regulator_exit(void)
{
platform_driver_unregister(&ab8500_ext_regulator_driver);
}
module_exit(ab8500_ext_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Bengt Jonsson <[email protected]>");
MODULE_DESCRIPTION("AB8500 external regulator driver");
MODULE_ALIAS("platform:ab8500-ext-regulator");
| linux-master | drivers/regulator/ab8500-ext.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* reg-virtual-consumer.c
*
* Copyright 2008 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <[email protected]>
*/
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
struct virtual_consumer_data {
struct mutex lock;
struct regulator *regulator;
bool enabled;
int min_uV;
int max_uV;
int min_uA;
int max_uA;
unsigned int mode;
};
static void update_voltage_constraints(struct device *dev,
struct virtual_consumer_data *data)
{
int ret;
if (data->min_uV && data->max_uV
&& data->min_uV <= data->max_uV) {
dev_dbg(dev, "Requesting %d-%duV\n",
data->min_uV, data->max_uV);
ret = regulator_set_voltage(data->regulator,
data->min_uV, data->max_uV);
if (ret != 0) {
dev_err(dev,
"regulator_set_voltage() failed: %d\n", ret);
return;
}
}
if (data->min_uV && data->max_uV && !data->enabled) {
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uV && data->max_uV) && data->enabled) {
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
static void update_current_limit_constraints(struct device *dev,
struct virtual_consumer_data *data)
{
int ret;
if (data->max_uA
&& data->min_uA <= data->max_uA) {
dev_dbg(dev, "Requesting %d-%duA\n",
data->min_uA, data->max_uA);
ret = regulator_set_current_limit(data->regulator,
data->min_uA, data->max_uA);
if (ret != 0) {
dev_err(dev,
"regulator_set_current_limit() failed: %d\n",
ret);
return;
}
}
if (data->max_uA && !data->enabled) {
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uA && data->max_uA) && data->enabled) {
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
static ssize_t show_min_uV(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->min_uV);
}
static ssize_t set_min_uV(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->min_uV = val;
update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_max_uV(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->max_uV);
}
static ssize_t set_max_uV(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->max_uV = val;
update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_min_uA(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->min_uA);
}
static ssize_t set_min_uA(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->min_uA = val;
update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_max_uA(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->max_uA);
}
static ssize_t set_max_uA(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
long val;
if (kstrtol(buf, 10, &val) != 0)
return count;
mutex_lock(&data->lock);
data->max_uA = val;
update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
switch (data->mode) {
case REGULATOR_MODE_FAST:
return sprintf(buf, "fast\n");
case REGULATOR_MODE_NORMAL:
return sprintf(buf, "normal\n");
case REGULATOR_MODE_IDLE:
return sprintf(buf, "idle\n");
case REGULATOR_MODE_STANDBY:
return sprintf(buf, "standby\n");
default:
return sprintf(buf, "unknown\n");
}
}
static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct virtual_consumer_data *data = dev_get_drvdata(dev);
unsigned int mode;
int ret;
/*
* sysfs_streq() doesn't need the \n's, but we add them so the strings
* will be shared with show_mode(), above.
*/
if (sysfs_streq(buf, "fast\n"))
mode = REGULATOR_MODE_FAST;
else if (sysfs_streq(buf, "normal\n"))
mode = REGULATOR_MODE_NORMAL;
else if (sysfs_streq(buf, "idle\n"))
mode = REGULATOR_MODE_IDLE;
else if (sysfs_streq(buf, "standby\n"))
mode = REGULATOR_MODE_STANDBY;
else {
dev_err(dev, "Configuring invalid mode\n");
return count;
}
mutex_lock(&data->lock);
ret = regulator_set_mode(data->regulator, mode);
if (ret == 0)
data->mode = mode;
else
dev_err(dev, "Failed to configure mode: %d\n", ret);
mutex_unlock(&data->lock);
return count;
}
static DEVICE_ATTR(min_microvolts, 0664, show_min_uV, set_min_uV);
static DEVICE_ATTR(max_microvolts, 0664, show_max_uV, set_max_uV);
static DEVICE_ATTR(min_microamps, 0664, show_min_uA, set_min_uA);
static DEVICE_ATTR(max_microamps, 0664, show_max_uA, set_max_uA);
static DEVICE_ATTR(mode, 0664, show_mode, set_mode);
static struct attribute *regulator_virtual_attributes[] = {
&dev_attr_min_microvolts.attr,
&dev_attr_max_microvolts.attr,
&dev_attr_min_microamps.attr,
&dev_attr_max_microamps.attr,
&dev_attr_mode.attr,
NULL
};
static const struct attribute_group regulator_virtual_attr_group = {
.attrs = regulator_virtual_attributes,
};
#ifdef CONFIG_OF
static const struct of_device_id regulator_virtual_consumer_of_match[] = {
{ .compatible = "regulator-virtual-consumer" },
{},
};
MODULE_DEVICE_TABLE(of, regulator_virtual_consumer_of_match);
#endif
static int regulator_virtual_probe(struct platform_device *pdev)
{
char *reg_id = dev_get_platdata(&pdev->dev);
struct virtual_consumer_data *drvdata;
static bool warned;
int ret;
if (!warned) {
warned = true;
pr_warn("**********************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** regulator-virtual-consumer is only for testing and **\n");
pr_warn("** debugging. Do not use it in a production kernel. **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("**********************************************************\n");
}
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct virtual_consumer_data),
GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
/*
* This virtual consumer does not have any hardware-defined supply
* name, so just allow the regulator to be specified in a property
* named "default-supply" when we're being probed from devicetree.
*/
if (!reg_id && pdev->dev.of_node)
reg_id = "default";
mutex_init(&drvdata->lock);
drvdata->regulator = devm_regulator_get(&pdev->dev, reg_id);
if (IS_ERR(drvdata->regulator))
return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->regulator),
"Failed to obtain supply '%s'\n",
reg_id);
ret = sysfs_create_group(&pdev->dev.kobj,
®ulator_virtual_attr_group);
if (ret != 0) {
dev_err(&pdev->dev,
"Failed to create attribute group: %d\n", ret);
return ret;
}
drvdata->mode = regulator_get_mode(drvdata->regulator);
platform_set_drvdata(pdev, drvdata);
return 0;
}
static int regulator_virtual_remove(struct platform_device *pdev)
{
struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, ®ulator_virtual_attr_group);
if (drvdata->enabled)
regulator_disable(drvdata->regulator);
return 0;
}
static struct platform_driver regulator_virtual_consumer_driver = {
.probe = regulator_virtual_probe,
.remove = regulator_virtual_remove,
.driver = {
.name = "reg-virt-consumer",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(regulator_virtual_consumer_of_match),
},
};
module_platform_driver(regulator_virtual_consumer_driver);
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_DESCRIPTION("Virtual regulator consumer");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:reg-virt-consumer");
| linux-master | drivers/regulator/virtual.c |
// SPDX-License-Identifier: GPL-2.0
//
// Regulator driver for tps6594 PMIC
//
// Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/tps6594.h>
#define BUCK_NB 5
#define LDO_NB 4
#define MULTI_PHASE_NB 4
#define REGS_INT_NB 4
enum tps6594_regulator_id {
/* DCDC's */
TPS6594_BUCK_1,
TPS6594_BUCK_2,
TPS6594_BUCK_3,
TPS6594_BUCK_4,
TPS6594_BUCK_5,
/* LDOs */
TPS6594_LDO_1,
TPS6594_LDO_2,
TPS6594_LDO_3,
TPS6594_LDO_4,
};
enum tps6594_multi_regulator_id {
/* Multi-phase DCDC's */
TPS6594_BUCK_12,
TPS6594_BUCK_34,
TPS6594_BUCK_123,
TPS6594_BUCK_1234,
};
struct tps6594_regulator_irq_type {
const char *irq_name;
const char *regulator_name;
const char *event_name;
unsigned long event;
};
static struct tps6594_regulator_irq_type tps6594_ext_regulator_irq_types[] = {
{ TPS6594_IRQ_NAME_VCCA_OV, "VCCA", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_VCCA_UV, "VCCA", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_VMON1_OV, "VMON1", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_VMON1_UV, "VMON1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_VMON1_RV, "VMON1", "residual voltage",
REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_VMON2_OV, "VMON2", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_VMON2_UV, "VMON2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_VMON2_RV, "VMON2", "residual voltage",
REGULATOR_EVENT_OVER_VOLTAGE_WARN },
};
struct tps6594_regulator_irq_data {
struct device *dev;
struct tps6594_regulator_irq_type *type;
struct regulator_dev *rdev;
};
struct tps6594_ext_regulator_irq_data {
struct device *dev;
struct tps6594_regulator_irq_type *type;
};
#define TPS6594_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
_em, _cr, _cm, _lr, _nlr, _delay, _fuv, \
_ct, _ncl, _bpm) \
{ \
.name = _name, \
.of_match = _of, \
.regulators_node = of_match_ptr("regulators"), \
.supply_name = _of, \
.id = _id, \
.ops = &(_ops), \
.n_voltages = _n, \
.type = _type, \
.owner = THIS_MODULE, \
.vsel_reg = _vr, \
.vsel_mask = _vm, \
.csel_reg = _cr, \
.csel_mask = _cm, \
.curr_table = _ct, \
.n_current_limits = _ncl, \
.enable_reg = _er, \
.enable_mask = _em, \
.volt_table = NULL, \
.linear_ranges = _lr, \
.n_linear_ranges = _nlr, \
.ramp_delay = _delay, \
.fixed_uV = _fuv, \
.bypass_reg = _vr, \
.bypass_mask = _bpm, \
} \
static const struct linear_range bucks_ranges[] = {
REGULATOR_LINEAR_RANGE(300000, 0x0, 0xe, 20000),
REGULATOR_LINEAR_RANGE(600000, 0xf, 0x72, 5000),
REGULATOR_LINEAR_RANGE(1100000, 0x73, 0xaa, 10000),
REGULATOR_LINEAR_RANGE(1660000, 0xab, 0xff, 20000),
};
static const struct linear_range ldos_1_2_3_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0x4, 0x3a, 50000),
};
static const struct linear_range ldos_4_ranges[] = {
REGULATOR_LINEAR_RANGE(1200000, 0x20, 0x74, 25000),
};
/* Operations permitted on BUCK1/2/3/4/5 */
static const struct regulator_ops tps6594_bucks_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
/* Operations permitted on LDO1/2/3 */
static const struct regulator_ops tps6594_ldos_1_2_3_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_bypass = regulator_set_bypass_regmap,
.get_bypass = regulator_get_bypass_regmap,
};
/* Operations permitted on LDO4 */
static const struct regulator_ops tps6594_ldos_4_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
};
static const struct regulator_desc buck_regs[] = {
TPS6594_REGULATOR("BUCK1", "buck1", TPS6594_BUCK_1,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(0),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(0),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 0, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK2", "buck2", TPS6594_BUCK_2,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(1),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(1),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 0, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK3", "buck3", TPS6594_BUCK_3,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(2),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(2),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 0, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK4", "buck4", TPS6594_BUCK_4,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(3),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(3),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 0, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK5", "buck5", TPS6594_BUCK_5,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(4),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(4),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 0, 0, NULL, 0, 0),
};
static struct tps6594_regulator_irq_type tps6594_buck1_irq_types[] = {
{ TPS6594_IRQ_NAME_BUCK1_OV, "BUCK1", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_BUCK1_UV, "BUCK1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_BUCK1_SC, "BUCK1", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_BUCK1_ILIM, "BUCK1", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_buck2_irq_types[] = {
{ TPS6594_IRQ_NAME_BUCK2_OV, "BUCK2", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_BUCK2_UV, "BUCK2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_BUCK2_SC, "BUCK2", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_BUCK2_ILIM, "BUCK2", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_buck3_irq_types[] = {
{ TPS6594_IRQ_NAME_BUCK3_OV, "BUCK3", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_BUCK3_UV, "BUCK3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_BUCK3_SC, "BUCK3", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_BUCK3_ILIM, "BUCK3", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_buck4_irq_types[] = {
{ TPS6594_IRQ_NAME_BUCK4_OV, "BUCK4", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_BUCK4_UV, "BUCK4", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_BUCK4_SC, "BUCK4", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_BUCK4_ILIM, "BUCK4", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_buck5_irq_types[] = {
{ TPS6594_IRQ_NAME_BUCK5_OV, "BUCK5", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_BUCK5_UV, "BUCK5", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_BUCK5_SC, "BUCK5", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_BUCK5_ILIM, "BUCK5", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_ldo1_irq_types[] = {
{ TPS6594_IRQ_NAME_LDO1_OV, "LDO1", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_LDO1_UV, "LDO1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_LDO1_SC, "LDO1", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_LDO1_ILIM, "LDO1", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_ldo2_irq_types[] = {
{ TPS6594_IRQ_NAME_LDO2_OV, "LDO2", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_LDO2_UV, "LDO2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_LDO2_SC, "LDO2", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_LDO2_ILIM, "LDO2", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_ldo3_irq_types[] = {
{ TPS6594_IRQ_NAME_LDO3_OV, "LDO3", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_LDO3_UV, "LDO3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_LDO3_SC, "LDO3", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_LDO3_ILIM, "LDO3", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type tps6594_ldo4_irq_types[] = {
{ TPS6594_IRQ_NAME_LDO4_OV, "LDO4", "overvoltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ TPS6594_IRQ_NAME_LDO4_UV, "LDO4", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
{ TPS6594_IRQ_NAME_LDO4_SC, "LDO4", "short circuit", REGULATOR_EVENT_REGULATION_OUT },
{ TPS6594_IRQ_NAME_LDO4_ILIM, "LDO4", "reach ilim, overcurrent",
REGULATOR_EVENT_OVER_CURRENT },
};
static struct tps6594_regulator_irq_type *tps6594_bucks_irq_types[] = {
tps6594_buck1_irq_types,
tps6594_buck2_irq_types,
tps6594_buck3_irq_types,
tps6594_buck4_irq_types,
tps6594_buck5_irq_types,
};
static struct tps6594_regulator_irq_type *tps6594_ldos_irq_types[] = {
tps6594_ldo1_irq_types,
tps6594_ldo2_irq_types,
tps6594_ldo3_irq_types,
tps6594_ldo4_irq_types,
};
static const struct regulator_desc multi_regs[] = {
TPS6594_REGULATOR("BUCK12", "buck12", TPS6594_BUCK_1,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(1),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(1),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 4000, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK34", "buck34", TPS6594_BUCK_3,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(3),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(3),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 0, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK123", "buck123", TPS6594_BUCK_1,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(1),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(1),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 4000, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK1234", "buck1234", TPS6594_BUCK_1,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_VOUT_1(1),
TPS6594_MASK_BUCKS_VSET,
TPS6594_REG_BUCKX_CTRL(1),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 4000, 0, NULL, 0, 0),
};
static const struct regulator_desc ldo_regs[] = {
TPS6594_REGULATOR("LDO1", "ldo1", TPS6594_LDO_1,
REGULATOR_VOLTAGE, tps6594_ldos_1_2_3_ops, TPS6594_MASK_LDO123_VSET,
TPS6594_REG_LDOX_VOUT(0),
TPS6594_MASK_LDO123_VSET,
TPS6594_REG_LDOX_CTRL(0),
TPS6594_BIT_LDO_EN, 0, 0, ldos_1_2_3_ranges,
1, 0, 0, NULL, 0, TPS6594_BIT_LDO_BYPASS),
TPS6594_REGULATOR("LDO2", "ldo2", TPS6594_LDO_2,
REGULATOR_VOLTAGE, tps6594_ldos_1_2_3_ops, TPS6594_MASK_LDO123_VSET,
TPS6594_REG_LDOX_VOUT(1),
TPS6594_MASK_LDO123_VSET,
TPS6594_REG_LDOX_CTRL(1),
TPS6594_BIT_LDO_EN, 0, 0, ldos_1_2_3_ranges,
1, 0, 0, NULL, 0, TPS6594_BIT_LDO_BYPASS),
TPS6594_REGULATOR("LDO3", "ldo3", TPS6594_LDO_3,
REGULATOR_VOLTAGE, tps6594_ldos_1_2_3_ops, TPS6594_MASK_LDO123_VSET,
TPS6594_REG_LDOX_VOUT(2),
TPS6594_MASK_LDO123_VSET,
TPS6594_REG_LDOX_CTRL(2),
TPS6594_BIT_LDO_EN, 0, 0, ldos_1_2_3_ranges,
1, 0, 0, NULL, 0, TPS6594_BIT_LDO_BYPASS),
TPS6594_REGULATOR("LDO4", "ldo4", TPS6594_LDO_4,
REGULATOR_VOLTAGE, tps6594_ldos_4_ops, TPS6594_MASK_LDO4_VSET >> 1,
TPS6594_REG_LDOX_VOUT(3),
TPS6594_MASK_LDO4_VSET,
TPS6594_REG_LDOX_CTRL(3),
TPS6594_BIT_LDO_EN, 0, 0, ldos_4_ranges,
1, 0, 0, NULL, 0, 0),
};
static irqreturn_t tps6594_regulator_irq_handler(int irq, void *data)
{
struct tps6594_regulator_irq_data *irq_data = data;
if (irq_data->type->event_name[0] == '\0') {
/* This is the timeout interrupt no specific regulator */
dev_err(irq_data->dev,
"System was put in shutdown due to timeout during an active or standby transition.\n");
return IRQ_HANDLED;
}
dev_err(irq_data->dev, "Error IRQ trap %s for %s\n",
irq_data->type->event_name, irq_data->type->regulator_name);
regulator_notifier_call_chain(irq_data->rdev,
irq_data->type->event, NULL);
return IRQ_HANDLED;
}
static int tps6594_request_reg_irqs(struct platform_device *pdev,
struct regulator_dev *rdev,
struct tps6594_regulator_irq_data *irq_data,
struct tps6594_regulator_irq_type *tps6594_regs_irq_types,
int *irq_idx)
{
struct tps6594_regulator_irq_type *irq_type;
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
int j;
int irq;
int error;
for (j = 0; j < REGS_INT_NB; j++) {
irq_type = &tps6594_regs_irq_types[j];
irq = platform_get_irq_byname(pdev, irq_type->irq_name);
if (irq < 0)
return -EINVAL;
irq_data[*irq_idx].dev = tps->dev;
irq_data[*irq_idx].type = irq_type;
irq_data[*irq_idx].rdev = rdev;
error = devm_request_threaded_irq(tps->dev, irq, NULL,
tps6594_regulator_irq_handler, IRQF_ONESHOT,
irq_type->irq_name, &irq_data[*irq_idx]);
if (error) {
dev_err(tps->dev, "tps6594 failed to request %s IRQ %d: %d\n",
irq_type->irq_name, irq, error);
return error;
}
(*irq_idx)++;
}
return 0;
}
static int tps6594_regulator_probe(struct platform_device *pdev)
{
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct regulator_dev *rdev;
struct device_node *np = NULL;
struct device_node *np_pmic_parent = NULL;
struct regulator_config config = {};
struct tps6594_regulator_irq_data *irq_data;
struct tps6594_ext_regulator_irq_data *irq_ext_reg_data;
struct tps6594_regulator_irq_type *irq_type;
u8 buck_configured[BUCK_NB] = { 0 };
u8 buck_multi[MULTI_PHASE_NB] = { 0 };
static const char * const multiphases[] = {"buck12", "buck123", "buck1234", "buck34"};
static const char *npname;
int error, i, irq, multi, delta;
int irq_idx = 0;
int buck_idx = 0;
size_t ext_reg_irq_nb = 2;
size_t reg_irq_nb;
enum {
MULTI_BUCK12,
MULTI_BUCK123,
MULTI_BUCK1234,
MULTI_BUCK12_34,
MULTI_FIRST = MULTI_BUCK12,
MULTI_LAST = MULTI_BUCK12_34,
MULTI_NUM = MULTI_LAST - MULTI_FIRST + 1
};
config.dev = tps->dev;
config.driver_data = tps;
config.regmap = tps->regmap;
/*
* Switch case defines different possible multi phase config
* This is based on dts buck node name.
* Buck node name must be chosen accordingly.
* Default case is no Multiphase buck.
* In case of Multiphase configuration, value should be defined for
* buck_configured to avoid creating bucks for every buck in multiphase
*/
for (multi = MULTI_FIRST; multi < MULTI_NUM; multi++) {
np = of_find_node_by_name(tps->dev->of_node, multiphases[multi]);
npname = of_node_full_name(np);
np_pmic_parent = of_get_parent(of_get_parent(np));
if (of_node_cmp(of_node_full_name(np_pmic_parent), tps->dev->of_node->full_name))
continue;
delta = strcmp(npname, multiphases[multi]);
if (!delta) {
switch (multi) {
case MULTI_BUCK12:
buck_multi[0] = 1;
buck_configured[0] = 1;
buck_configured[1] = 1;
break;
/* multiphase buck34 is supported only with buck12 */
case MULTI_BUCK12_34:
buck_multi[0] = 1;
buck_multi[1] = 1;
buck_configured[0] = 1;
buck_configured[1] = 1;
buck_configured[2] = 1;
buck_configured[3] = 1;
break;
case MULTI_BUCK123:
buck_multi[2] = 1;
buck_configured[0] = 1;
buck_configured[1] = 1;
buck_configured[2] = 1;
break;
case MULTI_BUCK1234:
buck_multi[3] = 1;
buck_configured[0] = 1;
buck_configured[1] = 1;
buck_configured[2] = 1;
buck_configured[3] = 1;
break;
}
}
}
if (tps->chip_id == LP8764) {
/* There is only 4 buck on LP8764 */
buck_configured[4] = 1;
reg_irq_nb = size_mul(REGS_INT_NB, (BUCK_NB - 1));
} else {
reg_irq_nb = size_mul(REGS_INT_NB, (size_add(BUCK_NB, LDO_NB)));
}
irq_data = devm_kmalloc_array(tps->dev, reg_irq_nb,
sizeof(struct tps6594_regulator_irq_data), GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
for (i = 0; i < MULTI_PHASE_NB; i++) {
if (buck_multi[i] == 0)
continue;
rdev = devm_regulator_register(&pdev->dev, &multi_regs[i], &config);
if (IS_ERR(rdev))
return dev_err_probe(tps->dev, PTR_ERR(rdev),
"failed to register %s regulator\n",
pdev->name);
/* config multiphase buck12+buck34 */
if (i == 1)
buck_idx = 2;
error = tps6594_request_reg_irqs(pdev, rdev, irq_data,
tps6594_bucks_irq_types[buck_idx], &irq_idx);
if (error)
return error;
error = tps6594_request_reg_irqs(pdev, rdev, irq_data,
tps6594_bucks_irq_types[buck_idx + 1], &irq_idx);
if (error)
return error;
if (i == 2 || i == 3) {
error = tps6594_request_reg_irqs(pdev, rdev, irq_data,
tps6594_bucks_irq_types[buck_idx + 2],
&irq_idx);
if (error)
return error;
}
if (i == 3) {
error = tps6594_request_reg_irqs(pdev, rdev, irq_data,
tps6594_bucks_irq_types[buck_idx + 3],
&irq_idx);
if (error)
return error;
}
}
for (i = 0; i < BUCK_NB; i++) {
if (buck_configured[i] == 1)
continue;
rdev = devm_regulator_register(&pdev->dev, &buck_regs[i], &config);
if (IS_ERR(rdev))
return dev_err_probe(tps->dev, PTR_ERR(rdev),
"failed to register %s regulator\n",
pdev->name);
error = tps6594_request_reg_irqs(pdev, rdev, irq_data,
tps6594_bucks_irq_types[i], &irq_idx);
if (error)
return error;
}
/* LP8764 dosen't have LDO */
if (tps->chip_id != LP8764) {
for (i = 0; i < ARRAY_SIZE(ldo_regs); i++) {
rdev = devm_regulator_register(&pdev->dev, &ldo_regs[i], &config);
if (IS_ERR(rdev))
return dev_err_probe(tps->dev, PTR_ERR(rdev),
"failed to register %s regulator\n",
pdev->name);
error = tps6594_request_reg_irqs(pdev, rdev, irq_data,
tps6594_ldos_irq_types[i],
&irq_idx);
if (error)
return error;
}
}
if (tps->chip_id == LP8764)
ext_reg_irq_nb = ARRAY_SIZE(tps6594_ext_regulator_irq_types);
irq_ext_reg_data = devm_kmalloc_array(tps->dev,
ext_reg_irq_nb,
sizeof(struct tps6594_ext_regulator_irq_data),
GFP_KERNEL);
if (!irq_ext_reg_data)
return -ENOMEM;
for (i = 0; i < ext_reg_irq_nb; ++i) {
irq_type = &tps6594_ext_regulator_irq_types[i];
irq = platform_get_irq_byname(pdev, irq_type->irq_name);
if (irq < 0)
return -EINVAL;
irq_ext_reg_data[i].dev = tps->dev;
irq_ext_reg_data[i].type = irq_type;
error = devm_request_threaded_irq(tps->dev, irq, NULL,
tps6594_regulator_irq_handler,
IRQF_ONESHOT,
irq_type->irq_name,
&irq_ext_reg_data[i]);
if (error)
return dev_err_probe(tps->dev, error,
"failed to request %s IRQ %d\n",
irq_type->irq_name, irq);
}
return 0;
}
static struct platform_driver tps6594_regulator_driver = {
.driver = {
.name = "tps6594-regulator",
},
.probe = tps6594_regulator_probe,
};
module_platform_driver(tps6594_regulator_driver);
MODULE_ALIAS("platform:tps6594-regulator");
MODULE_AUTHOR("Jerome Neanne <[email protected]>");
MODULE_DESCRIPTION("TPS6594 voltage regulator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/tps6594-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Analog Devices, Inc.
* ADI Regulator driver for the MAX77857
* MAX77859 and MAX77831.
*/
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/util_macros.h>
#define MAX77857_REG_INT_SRC 0x10
#define MAX77857_REG_INT_MASK 0x11
#define MAX77857_REG_CONT1 0x12
#define MAX77857_REG_CONT2 0x13
#define MAX77857_REG_CONT3 0x14
#define MAX77857_INT_SRC_OCP BIT(0)
#define MAX77857_INT_SRC_THS BIT(1)
#define MAX77857_INT_SRC_HARDSHORT BIT(2)
#define MAX77857_INT_SRC_OVP BIT(3)
#define MAX77857_INT_SRC_POK BIT(4)
#define MAX77857_ILIM_MASK GENMASK(2, 0)
#define MAX77857_CONT1_FREQ GENMASK(4, 3)
#define MAX77857_CONT3_FPWM BIT(5)
#define MAX77859_REG_INT_SRC 0x11
#define MAX77859_REG_CONT1 0x13
#define MAX77859_REG_CONT2 0x14
#define MAX77859_REG_CONT3 0x15
#define MAX77859_REG_CONT5 0x17
#define MAX77859_CONT2_FPWM BIT(2)
#define MAX77859_CONT2_INTB BIT(3)
#define MAX77859_CONT3_DVS_START BIT(2)
#define MAX77859_VOLTAGE_SEL_MASK GENMASK(9, 0)
#define MAX77859_CURRENT_MIN 1000000
#define MAX77859_CURRENT_MAX 5000000
#define MAX77859_CURRENT_STEP 50000
enum max77857_id {
ID_MAX77831 = 1,
ID_MAX77857,
ID_MAX77859,
ID_MAX77859A,
};
static bool max77857_volatile_reg(struct device *dev, unsigned int reg)
{
enum max77857_id id = (uintptr_t)dev_get_drvdata(dev);
switch (id) {
case ID_MAX77831:
case ID_MAX77857:
return reg == MAX77857_REG_INT_SRC;
case ID_MAX77859:
case ID_MAX77859A:
return reg == MAX77859_REG_INT_SRC;
default:
return true;
}
}
static struct regmap_config max77857_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_MAPLE,
.volatile_reg = max77857_volatile_reg,
};
static int max77857_get_status(struct regulator_dev *rdev)
{
unsigned int val;
int ret;
ret = regmap_read(rdev->regmap, MAX77857_REG_INT_SRC, &val);
if (ret)
return ret;
if (FIELD_GET(MAX77857_INT_SRC_POK, val))
return REGULATOR_STATUS_ON;
return REGULATOR_STATUS_ERROR;
}
static unsigned int max77857_get_mode(struct regulator_dev *rdev)
{
enum max77857_id id = (uintptr_t)rdev_get_drvdata(rdev);
unsigned int regval;
int ret;
switch (id) {
case ID_MAX77831:
case ID_MAX77857:
ret = regmap_read(rdev->regmap, MAX77857_REG_CONT3, ®val);
if (ret)
return ret;
if (FIELD_GET(MAX77857_CONT3_FPWM, regval))
return REGULATOR_MODE_FAST;
break;
case ID_MAX77859:
case ID_MAX77859A:
ret = regmap_read(rdev->regmap, MAX77859_REG_CONT2, ®val);
if (ret)
return ret;
if (FIELD_GET(MAX77859_CONT2_FPWM, regval))
return REGULATOR_MODE_FAST;
break;
default:
return -EINVAL;
}
return REGULATOR_MODE_NORMAL;
}
static int max77857_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
enum max77857_id id = (uintptr_t)rdev_get_drvdata(rdev);
unsigned int reg, val;
switch (id) {
case ID_MAX77831:
case ID_MAX77857:
reg = MAX77857_REG_CONT3;
val = MAX77857_CONT3_FPWM;
break;
case ID_MAX77859:
case ID_MAX77859A:
reg = MAX77859_REG_CONT2;
val = MAX77859_CONT2_FPWM;
break;
default:
return -EINVAL;
}
switch (mode) {
case REGULATOR_MODE_FAST:
return regmap_set_bits(rdev->regmap, reg, val);
case REGULATOR_MODE_NORMAL:
return regmap_clear_bits(rdev->regmap, reg, val);
default:
return -EINVAL;
}
}
static int max77857_get_error_flags(struct regulator_dev *rdev,
unsigned int *flags)
{
unsigned int val;
int ret;
ret = regmap_read(rdev->regmap, MAX77857_REG_INT_SRC, &val);
if (ret)
return ret;
*flags = 0;
if (FIELD_GET(MAX77857_INT_SRC_OVP, val))
*flags |= REGULATOR_ERROR_OVER_VOLTAGE_WARN;
if (FIELD_GET(MAX77857_INT_SRC_OCP, val) ||
FIELD_GET(MAX77857_INT_SRC_HARDSHORT, val))
*flags |= REGULATOR_ERROR_OVER_CURRENT;
if (FIELD_GET(MAX77857_INT_SRC_THS, val))
*flags |= REGULATOR_ERROR_OVER_TEMP;
if (!FIELD_GET(MAX77857_INT_SRC_POK, val))
*flags |= REGULATOR_ERROR_FAIL;
return 0;
}
static struct linear_range max77859_lin_ranges[] = {
REGULATOR_LINEAR_RANGE(3200000, 0x0A0, 0x320, 20000)
};
static const unsigned int max77859_ramp_table[4] = {
1000, 500, 250, 125
};
static int max77859_set_voltage_sel(struct regulator_dev *rdev,
unsigned int sel)
{
__be16 reg;
int ret;
reg = cpu_to_be16(sel);
ret = regmap_bulk_write(rdev->regmap, MAX77859_REG_CONT3, ®, 2);
if (ret)
return ret;
/* actually apply new voltage */
return regmap_set_bits(rdev->regmap, MAX77859_REG_CONT3,
MAX77859_CONT3_DVS_START);
}
static int max77859_get_voltage_sel(struct regulator_dev *rdev)
{
__be16 reg;
int ret;
ret = regmap_bulk_read(rdev->regmap, MAX77859_REG_CONT3, ®, 2);
if (ret)
return ret;
return FIELD_GET(MAX77859_VOLTAGE_SEL_MASK, __be16_to_cpu(reg));
}
static int max77859_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA)
{
u32 selector;
if (max_uA < MAX77859_CURRENT_MIN)
return -EINVAL;
selector = 0x12 + (max_uA - MAX77859_CURRENT_MIN) / MAX77859_CURRENT_STEP;
selector = clamp_val(selector, 0x00, 0x7F);
return regmap_write(rdev->regmap, MAX77859_REG_CONT5, selector);
}
static int max77859_get_current_limit(struct regulator_dev *rdev)
{
u32 selector;
int ret;
ret = regmap_read(rdev->regmap, MAX77859_REG_CONT5, &selector);
if (ret)
return ret;
if (selector <= 0x12)
return MAX77859_CURRENT_MIN;
if (selector >= 0x64)
return MAX77859_CURRENT_MAX;
return MAX77859_CURRENT_MIN + (selector - 0x12) * MAX77859_CURRENT_STEP;
}
static const struct regulator_ops max77859_regulator_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = max77859_set_voltage_sel,
.get_voltage_sel = max77859_get_voltage_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.get_status = max77857_get_status,
.set_mode = max77857_set_mode,
.get_mode = max77857_get_mode,
.get_error_flags = max77857_get_error_flags,
};
static const struct regulator_ops max77859a_regulator_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = max77859_set_voltage_sel,
.get_voltage_sel = max77859_get_voltage_sel,
.set_current_limit = max77859_set_current_limit,
.get_current_limit = max77859_get_current_limit,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.get_status = max77857_get_status,
.set_mode = max77857_set_mode,
.get_mode = max77857_get_mode,
.get_error_flags = max77857_get_error_flags,
};
static const struct regulator_ops max77857_regulator_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.get_status = max77857_get_status,
.set_mode = max77857_set_mode,
.get_mode = max77857_get_mode,
.get_error_flags = max77857_get_error_flags,
};
static struct linear_range max77857_lin_ranges[] = {
REGULATOR_LINEAR_RANGE(4485000, 0x3D, 0xCC, 73500)
};
static const unsigned int max77857_switch_freq[] = {
1200000, 1500000, 1800000, 2100000
};
#define RAMAP_DELAY_INIT_VAL 1333
static const unsigned int max77857_ramp_table[2][4] = {
{ RAMAP_DELAY_INIT_VAL, 667, 333, 227 }, /* when switch freq is 1.8MHz or 2.1MHz */
{ 1166, 667, 333, 167 }, /* when switch freq is 1.2MHz or 1.5MHz */
};
static struct regulator_desc max77857_regulator_desc = {
.ops = &max77857_regulator_ops,
.name = "max77857",
.linear_ranges = max77857_lin_ranges,
.n_linear_ranges = ARRAY_SIZE(max77857_lin_ranges),
.vsel_mask = 0xFF,
.vsel_reg = MAX77857_REG_CONT2,
.ramp_delay_table = max77857_ramp_table[0],
.n_ramp_values = ARRAY_SIZE(max77857_ramp_table[0]),
.ramp_reg = MAX77857_REG_CONT3,
.ramp_mask = GENMASK(1, 0),
.ramp_delay = RAMAP_DELAY_INIT_VAL,
.owner = THIS_MODULE,
};
static void max77857_calc_range(struct device *dev, enum max77857_id id)
{
struct linear_range *range;
unsigned long vref_step;
u32 rtop = 0;
u32 rbot = 0;
device_property_read_u32(dev, "adi,rtop-ohms", &rtop);
device_property_read_u32(dev, "adi,rbot-ohms", &rbot);
if (!rbot || !rtop)
return;
switch (id) {
case ID_MAX77831:
case ID_MAX77857:
range = max77857_lin_ranges;
vref_step = 4900UL;
break;
case ID_MAX77859:
case ID_MAX77859A:
range = max77859_lin_ranges;
vref_step = 1250UL;
break;
}
range->step = DIV_ROUND_CLOSEST(vref_step * (rbot + rtop), rbot);
range->min = range->step * range->min_sel;
}
static int max77857_probe(struct i2c_client *client)
{
const struct i2c_device_id *i2c_id;
struct device *dev = &client->dev;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
struct regmap *regmap;
enum max77857_id id;
u32 switch_freq = 0;
int ret;
i2c_id = i2c_client_get_device_id(client);
if (!i2c_id)
return -EINVAL;
id = i2c_id->driver_data;
dev_set_drvdata(dev, (void *)id);
if (id == ID_MAX77859 || id == ID_MAX77859A) {
max77857_regulator_desc.ops = &max77859_regulator_ops;
max77857_regulator_desc.linear_ranges = max77859_lin_ranges;
max77857_regulator_desc.ramp_delay_table = max77859_ramp_table;
max77857_regulator_desc.ramp_delay = max77859_ramp_table[0];
}
if (id == ID_MAX77859A)
max77857_regulator_desc.ops = &max77859a_regulator_ops;
max77857_calc_range(dev, id);
regmap = devm_regmap_init_i2c(client, &max77857_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"cannot initialize regmap\n");
device_property_read_u32(dev, "adi,switch-frequency-hz", &switch_freq);
if (switch_freq) {
switch_freq = find_closest(switch_freq, max77857_switch_freq,
ARRAY_SIZE(max77857_switch_freq));
if (id == ID_MAX77831 && switch_freq == 3)
switch_freq = 2;
switch (id) {
case ID_MAX77831:
case ID_MAX77857:
ret = regmap_update_bits(regmap, MAX77857_REG_CONT1,
MAX77857_CONT1_FREQ, switch_freq);
if (switch_freq >= 2)
break;
max77857_regulator_desc.ramp_delay_table = max77857_ramp_table[1];
max77857_regulator_desc.ramp_delay = max77857_ramp_table[1][0];
break;
case ID_MAX77859:
case ID_MAX77859A:
ret = regmap_update_bits(regmap, MAX77859_REG_CONT1,
MAX77857_CONT1_FREQ, switch_freq);
break;
}
if (ret)
return ret;
}
cfg.dev = dev;
cfg.driver_data = (void *)id;
cfg.regmap = regmap;
cfg.init_data = of_get_regulator_init_data(dev, dev->of_node,
&max77857_regulator_desc);
if (!cfg.init_data)
return -ENOMEM;
rdev = devm_regulator_register(dev, &max77857_regulator_desc, &cfg);
if (IS_ERR(rdev))
return dev_err_probe(dev, PTR_ERR(rdev),
"cannot register regulator\n");
return 0;
}
const struct i2c_device_id max77857_id[] = {
{ "max77831", ID_MAX77831 },
{ "max77857", ID_MAX77857 },
{ "max77859", ID_MAX77859 },
{ "max77859a", ID_MAX77859A },
{ }
};
MODULE_DEVICE_TABLE(i2c, max77857_id);
static const struct of_device_id max77857_of_id[] = {
{ .compatible = "adi,max77831", .data = (void *)ID_MAX77831 },
{ .compatible = "adi,max77857", .data = (void *)ID_MAX77857 },
{ .compatible = "adi,max77859", .data = (void *)ID_MAX77859 },
{ .compatible = "adi,max77859a", .data = (void *)ID_MAX77859A },
{ }
};
MODULE_DEVICE_TABLE(of, max77857_of_id);
static struct i2c_driver max77857_driver = {
.driver = {
.name = "max77857",
.of_match_table = max77857_of_id,
},
.id_table = max77857_id,
.probe = max77857_probe,
};
module_i2c_driver(max77857_driver);
MODULE_DESCRIPTION("Analog Devices MAX77857 Buck-Boost Converter Driver");
MODULE_AUTHOR("Ibrahim Tilki <[email protected]>");
MODULE_AUTHOR("Okan Sahin <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/max77857-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bits.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
enum {
MT6370_IDX_DSVBOOST = 0,
MT6370_IDX_DSVPOS,
MT6370_IDX_DSVNEG,
MT6370_IDX_VIBLDO,
MT6370_MAX_IDX
};
#define MT6370_REG_LDO_CFG 0x180
#define MT6370_REG_LDO_VOUT 0x181
#define MT6370_REG_DB_CTRL1 0x1B0
#define MT6370_REG_DB_CTRL2 0x1B1
#define MT6370_REG_DB_VBST 0x1B2
#define MT6370_REG_DB_VPOS 0x1B3
#define MT6370_REG_DB_VNEG 0x1B4
#define MT6370_REG_LDO_STAT 0x1DC
#define MT6370_REG_DB_STAT 0x1DF
#define MT6370_LDOOMS_MASK BIT(7)
#define MT6370_LDOEN_MASK BIT(7)
#define MT6370_LDOVOUT_MASK GENMASK(3, 0)
#define MT6370_DBPERD_MASK (BIT(7) | BIT(4))
#define MT6370_DBEXTEN_MASK BIT(0)
#define MT6370_DBVPOSEN_MASK BIT(6)
#define MT6370_DBVPOSDISG_MASK BIT(5)
#define MT6370_DBVNEGEN_MASK BIT(3)
#define MT6370_DBVNEGDISG_MASK BIT(2)
#define MT6370_DBALLON_MASK (MT6370_DBVPOSEN_MASK | MT6370_DBVNEGEN_MASK)
#define MT6370_DBSLEW_MASK GENMASK(7, 6)
#define MT6370_DBVOUT_MASK GENMASK(5, 0)
#define MT6370_LDOOC_EVT_MASK BIT(7)
#define MT6370_POSSCP_EVT_MASK BIT(7)
#define MT6370_NEGSCP_EVT_MASK BIT(6)
#define MT6370_BSTOCP_EVT_MASK BIT(5)
#define MT6370_POSOCP_EVT_MASK BIT(4)
#define MT6370_NEGOCP_EVT_MASK BIT(3)
#define MT6370_LDO_MINUV 1600000
#define MT6370_LDO_STPUV 200000
#define MT6370_LDO_N_VOLT 13
#define MT6370_DBVBOOST_MINUV 4000000
#define MT6370_DBVBOOST_STPUV 50000
#define MT6370_DBVBOOST_N_VOLT 45
#define MT6370_DBVOUT_MINUV 4000000
#define MT6370_DBVOUT_STPUV 50000
#define MT6370_DBVOUT_N_VOLT 41
struct mt6370_priv {
struct device *dev;
struct regmap *regmap;
struct regulator_dev *rdev[MT6370_MAX_IDX];
bool use_external_ctrl;
};
static const unsigned int mt6370_vpos_ramp_tbl[] = { 8540, 5840, 4830, 3000 };
static const unsigned int mt6370_vneg_ramp_tbl[] = { 10090, 6310, 5050, 3150 };
static int mt6370_get_error_flags(struct regulator_dev *rdev,
unsigned int *flags)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int stat_reg, stat, rpt_flags = 0;
int rid = rdev_get_id(rdev), ret;
if (rid == MT6370_IDX_VIBLDO)
stat_reg = MT6370_REG_LDO_STAT;
else
stat_reg = MT6370_REG_DB_STAT;
ret = regmap_read(regmap, stat_reg, &stat);
if (ret)
return ret;
switch (rid) {
case MT6370_IDX_DSVBOOST:
if (stat & MT6370_BSTOCP_EVT_MASK)
rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
break;
case MT6370_IDX_DSVPOS:
if (stat & MT6370_POSSCP_EVT_MASK)
rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE;
if (stat & MT6370_POSOCP_EVT_MASK)
rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
break;
case MT6370_IDX_DSVNEG:
if (stat & MT6370_NEGSCP_EVT_MASK)
rpt_flags |= REGULATOR_ERROR_UNDER_VOLTAGE;
if (stat & MT6370_NEGOCP_EVT_MASK)
rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
break;
default:
if (stat & MT6370_LDOOC_EVT_MASK)
rpt_flags |= REGULATOR_ERROR_OVER_CURRENT;
break;
}
*flags = rpt_flags;
return 0;
}
static const struct regulator_ops mt6370_dbvboost_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
.get_error_flags = mt6370_get_error_flags,
};
static const struct regulator_ops mt6370_dbvout_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.get_error_flags = mt6370_get_error_flags,
};
static const struct regulator_ops mt6370_ldo_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.get_error_flags = mt6370_get_error_flags,
};
static int mt6370_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct mt6370_priv *priv = config->driver_data;
struct gpio_desc *enable_gpio;
int ret;
enable_gpio = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0,
GPIOD_OUT_HIGH |
GPIOD_FLAGS_BIT_NONEXCLUSIVE,
desc->name);
if (IS_ERR(enable_gpio)) {
config->ena_gpiod = NULL;
return 0;
}
/*
* RG control by default
* Only if all are using external pin, change all by external control
*/
if (priv->use_external_ctrl) {
ret = regmap_update_bits(priv->regmap, MT6370_REG_DB_CTRL1,
MT6370_DBEXTEN_MASK,
MT6370_DBEXTEN_MASK);
if (ret)
return ret;
}
config->ena_gpiod = enable_gpio;
priv->use_external_ctrl = true;
return 0;
}
static const struct regulator_desc mt6370_regulator_descs[] = {
{
.name = "mt6370-dsv-vbst",
.of_match = of_match_ptr("dsvbst"),
.regulators_node = of_match_ptr("regulators"),
.id = MT6370_IDX_DSVBOOST,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.ops = &mt6370_dbvboost_ops,
.min_uV = MT6370_DBVBOOST_MINUV,
.uV_step = MT6370_DBVBOOST_STPUV,
.n_voltages = MT6370_DBVBOOST_N_VOLT,
.vsel_reg = MT6370_REG_DB_VBST,
.vsel_mask = MT6370_DBVOUT_MASK,
.bypass_reg = MT6370_REG_DB_CTRL1,
.bypass_mask = MT6370_DBPERD_MASK,
.bypass_val_on = MT6370_DBPERD_MASK,
},
{
.name = "mt6370-dsv-vpos",
.of_match = of_match_ptr("dsvpos"),
.regulators_node = of_match_ptr("regulators"),
.id = MT6370_IDX_DSVPOS,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.of_parse_cb = mt6370_of_parse_cb,
.ops = &mt6370_dbvout_ops,
.min_uV = MT6370_DBVOUT_MINUV,
.uV_step = MT6370_DBVOUT_STPUV,
.n_voltages = MT6370_DBVOUT_N_VOLT,
.vsel_reg = MT6370_REG_DB_VPOS,
.vsel_mask = MT6370_DBVOUT_MASK,
.enable_reg = MT6370_REG_DB_CTRL2,
.enable_mask = MT6370_DBVPOSEN_MASK,
.ramp_reg = MT6370_REG_DB_VPOS,
.ramp_mask = MT6370_DBSLEW_MASK,
.ramp_delay_table = mt6370_vpos_ramp_tbl,
.n_ramp_values = ARRAY_SIZE(mt6370_vpos_ramp_tbl),
.active_discharge_reg = MT6370_REG_DB_CTRL2,
.active_discharge_mask = MT6370_DBVPOSDISG_MASK,
.active_discharge_on = MT6370_DBVPOSDISG_MASK,
},
{
.name = "mt6370-dsv-vneg",
.of_match = of_match_ptr("dsvneg"),
.regulators_node = of_match_ptr("regulators"),
.id = MT6370_IDX_DSVNEG,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.of_parse_cb = mt6370_of_parse_cb,
.ops = &mt6370_dbvout_ops,
.min_uV = MT6370_DBVOUT_MINUV,
.uV_step = MT6370_DBVOUT_STPUV,
.n_voltages = MT6370_DBVOUT_N_VOLT,
.vsel_reg = MT6370_REG_DB_VNEG,
.vsel_mask = MT6370_DBVOUT_MASK,
.enable_reg = MT6370_REG_DB_CTRL2,
.enable_mask = MT6370_DBVNEGEN_MASK,
.ramp_reg = MT6370_REG_DB_VNEG,
.ramp_mask = MT6370_DBSLEW_MASK,
.ramp_delay_table = mt6370_vneg_ramp_tbl,
.n_ramp_values = ARRAY_SIZE(mt6370_vneg_ramp_tbl),
.active_discharge_reg = MT6370_REG_DB_CTRL2,
.active_discharge_mask = MT6370_DBVNEGDISG_MASK,
.active_discharge_on = MT6370_DBVNEGDISG_MASK,
},
{
.name = "mt6370-vib-ldo",
.of_match = of_match_ptr("vibldo"),
.regulators_node = of_match_ptr("regulators"),
.id = MT6370_IDX_VIBLDO,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.ops = &mt6370_ldo_ops,
.min_uV = MT6370_LDO_MINUV,
.uV_step = MT6370_LDO_STPUV,
.n_voltages = MT6370_LDO_N_VOLT,
.vsel_reg = MT6370_REG_LDO_VOUT,
.vsel_mask = MT6370_LDOVOUT_MASK,
.enable_reg = MT6370_REG_LDO_VOUT,
.enable_mask = MT6370_LDOEN_MASK,
.active_discharge_reg = MT6370_REG_LDO_CFG,
.active_discharge_mask = MT6370_LDOOMS_MASK,
.active_discharge_on = MT6370_LDOOMS_MASK,
}
};
static irqreturn_t mt6370_scp_handler(int irq, void *data)
{
struct regulator_dev *rdev = data;
regulator_notifier_call_chain(rdev, REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
return IRQ_HANDLED;
}
static irqreturn_t mt6370_ocp_handler(int irq, void *data)
{
struct regulator_dev *rdev = data;
regulator_notifier_call_chain(rdev, REGULATOR_EVENT_OVER_CURRENT, NULL);
return IRQ_HANDLED;
}
static int mt6370_regulator_irq_register(struct mt6370_priv *priv)
{
struct platform_device *pdev = to_platform_device(priv->dev);
static const struct {
const char *name;
int rid;
irq_handler_t handler;
} mt6370_irqs[] = {
{ "db_vpos_scp", MT6370_IDX_DSVPOS, mt6370_scp_handler },
{ "db_vneg_scp", MT6370_IDX_DSVNEG, mt6370_scp_handler },
{ "db_vbst_ocp", MT6370_IDX_DSVBOOST, mt6370_ocp_handler },
{ "db_vpos_ocp", MT6370_IDX_DSVPOS, mt6370_ocp_handler },
{ "db_vneg_ocp", MT6370_IDX_DSVNEG, mt6370_ocp_handler },
{ "ldo_oc", MT6370_IDX_VIBLDO, mt6370_ocp_handler }
};
struct regulator_dev *rdev;
int i, irq, ret;
for (i = 0; i < ARRAY_SIZE(mt6370_irqs); i++) {
irq = platform_get_irq_byname(pdev, mt6370_irqs[i].name);
rdev = priv->rdev[mt6370_irqs[i].rid];
ret = devm_request_threaded_irq(priv->dev, irq, NULL,
mt6370_irqs[i].handler, 0,
mt6370_irqs[i].name, rdev);
if (ret) {
dev_err(priv->dev,
"Failed to register (%d) interrupt\n", i);
return ret;
}
}
return 0;
}
static int mt6370_regualtor_register(struct mt6370_priv *priv)
{
struct regulator_dev *rdev;
struct regulator_config cfg = {};
struct device *parent = priv->dev->parent;
int i;
cfg.dev = parent;
cfg.driver_data = priv;
for (i = 0; i < MT6370_MAX_IDX; i++) {
rdev = devm_regulator_register(priv->dev,
mt6370_regulator_descs + i,
&cfg);
if (IS_ERR(rdev)) {
dev_err(priv->dev,
"Failed to register (%d) regulator\n", i);
return PTR_ERR(rdev);
}
priv->rdev[i] = rdev;
}
return 0;
}
static int mt6370_regulator_probe(struct platform_device *pdev)
{
struct mt6370_priv *priv;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!priv->regmap) {
dev_err(&pdev->dev, "Failed to init regmap\n");
return -ENODEV;
}
ret = mt6370_regualtor_register(priv);
if (ret)
return ret;
return mt6370_regulator_irq_register(priv);
}
static const struct platform_device_id mt6370_devid_table[] = {
{ "mt6370-regulator", 0},
{}
};
MODULE_DEVICE_TABLE(platform, mt6370_devid_table);
static struct platform_driver mt6370_regulator_driver = {
.driver = {
.name = "mt6370-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = mt6370_devid_table,
.probe = mt6370_regulator_probe,
};
module_platform_driver(mt6370_regulator_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("Mediatek MT6370 Regulator Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/mt6370-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Richtek Technology Corp.
*
* Author: ChiYuan Huang <[email protected]>
*/
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#define RT4803_AUTO_MODE 1
#define RT4803_FPWM_MODE 2
#define RT4803_REG_CONFIG 0x01
#define RT4803_REG_VSELL 0x02
#define RT4803_REG_VSELH 0x03
#define RT4803_REG_ILIM 0x04
#define RT4803_REG_STAT 0x05
#define RT4803_MODE_MASK GENMASK(1, 0)
#define RT4803_VSEL_MASK GENMASK(4, 0)
#define RT4803_ILIM_MASK GENMASK(3, 0)
#define RT4803_TSD_MASK BIT(7)
#define RT4803_HOTDIE_MASK BIT(6)
#define RT4803_FAULT_MASK BIT(1)
#define RT4803_PGOOD_MASK BIT(0)
#define RT4803_VOUT_MINUV 2850000
#define RT4803_VOUT_STEPUV 50000
#define RT4803_VOUT_NUM 32
static int rt4803_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int modeval;
switch (mode) {
case REGULATOR_MODE_NORMAL:
modeval = RT4803_AUTO_MODE;
break;
case REGULATOR_MODE_FAST:
modeval = RT4803_FPWM_MODE;
break;
default:
return -EINVAL;
}
modeval <<= ffs(RT4803_MODE_MASK) - 1;
return regmap_update_bits(regmap, RT4803_REG_CONFIG, RT4803_MODE_MASK, modeval);
}
static unsigned int rt4803_get_mode(struct regulator_dev *rdev)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int modeval;
int ret;
ret = regmap_read(regmap, RT4803_REG_CONFIG, &modeval);
if (ret)
return REGULATOR_MODE_INVALID;
modeval >>= ffs(RT4803_MODE_MASK) - 1;
switch (modeval) {
case RT4803_AUTO_MODE:
return REGULATOR_MODE_NORMAL;
case RT4803_FPWM_MODE:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_INVALID;
}
}
static int rt4803_get_error_flags(struct regulator_dev *rdev, unsigned int *flags)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int state, events = 0;
int ret;
ret = regmap_read(regmap, RT4803_REG_STAT, &state);
if (ret)
return ret;
if (state & RT4803_PGOOD_MASK)
goto out_error_flag;
if (state & RT4803_FAULT_MASK)
events |= REGULATOR_ERROR_FAIL;
if (state & RT4803_HOTDIE_MASK)
events |= REGULATOR_ERROR_OVER_TEMP_WARN;
if (state & RT4803_TSD_MASK)
events |= REGULATOR_ERROR_OVER_TEMP;
out_error_flag:
*flags = events;
return 0;
}
static int rt4803_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int reg, vsel;
if (rdev->desc->vsel_reg == RT4803_REG_VSELL)
reg = RT4803_REG_VSELH;
else
reg = RT4803_REG_VSELL;
vsel = (uV - rdev->desc->min_uV) / rdev->desc->uV_step;
vsel <<= ffs(RT4803_VSEL_MASK) - 1;
return regmap_update_bits(regmap, reg, RT4803_VSEL_MASK, vsel);
}
static const struct regulator_ops rt4803_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_mode = rt4803_set_mode,
.get_mode = rt4803_get_mode,
.get_error_flags = rt4803_get_error_flags,
.set_suspend_voltage = rt4803_set_suspend_voltage,
};
static unsigned int rt4803_of_map_mode(unsigned int mode)
{
switch (mode) {
case RT4803_AUTO_MODE:
return REGULATOR_MODE_NORMAL;
case RT4803_FPWM_MODE:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_INVALID;
}
}
static const struct regmap_config rt4803_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = RT4803_REG_STAT,
};
static int rt4803_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regmap *regmap;
struct regulator_desc *desc;
struct regulator_config cfg = {};
struct regulator_dev *rdev;
bool vsel_act_high;
int ret;
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
regmap = devm_regmap_init_i2c(i2c, &rt4803_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap), "Failed to init regmap\n");
/* Always configure the input current limit to max 5A at initial */
ret = regmap_update_bits(regmap, RT4803_REG_ILIM, RT4803_ILIM_MASK, 0xff);
if (ret)
return dev_err_probe(dev, ret, "Failed to config ILIM to max\n");
vsel_act_high = device_property_read_bool(dev, "richtek,vsel-active-high");
desc->name = "rt4803-regulator";
desc->type = REGULATOR_VOLTAGE;
desc->owner = THIS_MODULE;
desc->ops = &rt4803_regulator_ops;
desc->min_uV = RT4803_VOUT_MINUV;
desc->uV_step = RT4803_VOUT_STEPUV;
desc->n_voltages = RT4803_VOUT_NUM;
desc->vsel_mask = RT4803_VSEL_MASK;
desc->of_map_mode = rt4803_of_map_mode;
if (vsel_act_high)
desc->vsel_reg = RT4803_REG_VSELH;
else
desc->vsel_reg = RT4803_REG_VSELL;
cfg.dev = dev;
cfg.of_node = dev_of_node(dev);
cfg.init_data = of_get_regulator_init_data(dev, dev_of_node(dev), desc);
rdev = devm_regulator_register(dev, desc, &cfg);
return PTR_ERR_OR_ZERO(rdev);
}
static const struct of_device_id rt4803_device_match_table[] = {
{ .compatible = "richtek,rt4803" },
{}
};
MODULE_DEVICE_TABLE(of, rt4803_device_match_table);
static struct i2c_driver rt4803_driver = {
.driver = {
.name = "rt4803",
.of_match_table = rt4803_device_match_table,
},
.probe = rt4803_probe,
};
module_i2c_driver(rt4803_driver);
MODULE_DESCRIPTION("Richtek RT4803 voltage regulator driver");
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/rt4803.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#define RT6245_VIRT_OCLIMIT 0x00
#define RT6245_VIRT_OTLEVEL 0x01
#define RT6245_VIRT_PGDLYTIME 0x02
#define RT6245_VIRT_SLEWRATE 0x03
#define RT6245_VIRT_SWFREQ 0x04
#define RT6245_VIRT_VOUT 0x05
#define RT6245_VOUT_MASK GENMASK(6, 0)
#define RT6245_SLEW_MASK GENMASK(2, 0)
#define RT6245_CHKSUM_MASK BIT(7)
#define RT6245_CODE_MASK GENMASK(6, 0)
/* HW Enable + Soft start time */
#define RT6245_ENTIME_IN_US 5000
#define RT6245_VOUT_MINUV 437500
#define RT6245_VOUT_MAXUV 1387500
#define RT6245_VOUT_STEPUV 12500
#define RT6245_NUM_VOUT ((RT6245_VOUT_MAXUV - RT6245_VOUT_MINUV) / RT6245_VOUT_STEPUV + 1)
struct rt6245_priv {
struct gpio_desc *enable_gpio;
bool enable_state;
};
static int rt6245_enable(struct regulator_dev *rdev)
{
struct rt6245_priv *priv = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev_get_regmap(rdev);
int ret;
if (!priv->enable_gpio)
return 0;
gpiod_direction_output(priv->enable_gpio, 1);
usleep_range(RT6245_ENTIME_IN_US, RT6245_ENTIME_IN_US + 1000);
regcache_cache_only(regmap, false);
ret = regcache_sync(regmap);
if (ret)
return ret;
priv->enable_state = true;
return 0;
}
static int rt6245_disable(struct regulator_dev *rdev)
{
struct rt6245_priv *priv = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev_get_regmap(rdev);
if (!priv->enable_gpio)
return -EINVAL;
regcache_cache_only(regmap, true);
regcache_mark_dirty(regmap);
gpiod_direction_output(priv->enable_gpio, 0);
priv->enable_state = false;
return 0;
}
static int rt6245_is_enabled(struct regulator_dev *rdev)
{
struct rt6245_priv *priv = rdev_get_drvdata(rdev);
return priv->enable_state ? 1 : 0;
}
static const struct regulator_ops rt6245_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.enable = rt6245_enable,
.disable = rt6245_disable,
.is_enabled = rt6245_is_enabled,
};
/* ramp delay dividend is 12500 uV/uS, and divisor from 1 to 8 */
static const unsigned int rt6245_ramp_delay_table[] = {
12500, 6250, 4167, 3125, 2500, 2083, 1786, 1562
};
static const struct regulator_desc rt6245_regulator_desc = {
.name = "rt6245-regulator",
.ops = &rt6245_regulator_ops,
.type = REGULATOR_VOLTAGE,
.min_uV = RT6245_VOUT_MINUV,
.uV_step = RT6245_VOUT_STEPUV,
.n_voltages = RT6245_NUM_VOUT,
.ramp_delay_table = rt6245_ramp_delay_table,
.n_ramp_values = ARRAY_SIZE(rt6245_ramp_delay_table),
.owner = THIS_MODULE,
.vsel_reg = RT6245_VIRT_VOUT,
.vsel_mask = RT6245_VOUT_MASK,
.ramp_reg = RT6245_VIRT_SLEWRATE,
.ramp_mask = RT6245_SLEW_MASK,
};
static int rt6245_init_device_properties(struct device *dev)
{
const struct {
const char *name;
unsigned int reg;
} rt6245_props[] = {
{ "richtek,oc-level-select", RT6245_VIRT_OCLIMIT },
{ "richtek,ot-level-select", RT6245_VIRT_OTLEVEL },
{ "richtek,pgdly-time-select", RT6245_VIRT_PGDLYTIME },
{ "richtek,switch-freq-select", RT6245_VIRT_SWFREQ }
};
struct regmap *regmap = dev_get_regmap(dev, NULL);
u8 propval;
int i, ret;
for (i = 0; i < ARRAY_SIZE(rt6245_props); i++) {
ret = device_property_read_u8(dev, rt6245_props[i].name, &propval);
if (ret)
continue;
ret = regmap_write(regmap, rt6245_props[i].reg, propval);
if (ret) {
dev_err(dev, "Fail to apply [%s:%d]\n", rt6245_props[i].name, propval);
return ret;
}
}
return 0;
}
static int rt6245_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct i2c_client *i2c = context;
static const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
unsigned int code, bit_count;
code = func_base[reg];
code += val;
/* xor checksum for bit 6 to 0 */
bit_count = hweight8(code & RT6245_CODE_MASK);
if (bit_count % 2)
code |= RT6245_CHKSUM_MASK;
else
code &= ~RT6245_CHKSUM_MASK;
return i2c_smbus_write_byte(i2c, code);
}
static const struct reg_default rt6245_reg_defaults[] = {
/* Default over current 14A */
{ RT6245_VIRT_OCLIMIT, 2 },
/* Default over temperature 150'c */
{ RT6245_VIRT_OTLEVEL, 0 },
/* Default power good delay time 10us */
{ RT6245_VIRT_PGDLYTIME, 1 },
/* Default slewrate 12.5mV/uS */
{ RT6245_VIRT_SLEWRATE, 0 },
/* Default switch frequency 800KHz */
{ RT6245_VIRT_SWFREQ, 1 },
/* Default voltage 750mV */
{ RT6245_VIRT_VOUT, 0x19 }
};
static const struct regmap_config rt6245_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = RT6245_VIRT_VOUT,
.cache_type = REGCACHE_FLAT,
.reg_defaults = rt6245_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(rt6245_reg_defaults),
.reg_write = rt6245_reg_write,
};
static int rt6245_probe(struct i2c_client *i2c)
{
struct rt6245_priv *priv;
struct regmap *regmap;
struct regulator_config regulator_cfg = {};
struct regulator_dev *rdev;
int ret;
priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->enable_state = true;
priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(priv->enable_gpio)) {
dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
return PTR_ERR(priv->enable_gpio);
}
usleep_range(RT6245_ENTIME_IN_US, RT6245_ENTIME_IN_US + 1000);
regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt6245_regmap_config);
if (IS_ERR(regmap)) {
dev_err(&i2c->dev, "Failed to initialize the regmap\n");
return PTR_ERR(regmap);
}
ret = rt6245_init_device_properties(&i2c->dev);
if (ret) {
dev_err(&i2c->dev, "Failed to initialize device properties\n");
return ret;
}
regulator_cfg.dev = &i2c->dev;
regulator_cfg.of_node = i2c->dev.of_node;
regulator_cfg.regmap = regmap;
regulator_cfg.driver_data = priv;
regulator_cfg.init_data = of_get_regulator_init_data(&i2c->dev, i2c->dev.of_node,
&rt6245_regulator_desc);
rdev = devm_regulator_register(&i2c->dev, &rt6245_regulator_desc, ®ulator_cfg);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register regulator\n");
return PTR_ERR(rdev);
}
return 0;
}
static const struct of_device_id __maybe_unused rt6245_of_match_table[] = {
{ .compatible = "richtek,rt6245", },
{}
};
MODULE_DEVICE_TABLE(of, rt6245_of_match_table);
static struct i2c_driver rt6245_driver = {
.driver = {
.name = "rt6245",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rt6245_of_match_table,
},
.probe = rt6245_probe,
};
module_i2c_driver(rt6245_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("Richtek RT6245 Regulator Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/rt6245-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Renesas RAA215300 PMIC driver
//
// Copyright (C) 2023 Renesas Electronics Corporation
//
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#define RAA215300_FAULT_LATCHED_STATUS_1 0x59
#define RAA215300_FAULT_LATCHED_STATUS_2 0x5a
#define RAA215300_FAULT_LATCHED_STATUS_3 0x5b
#define RAA215300_FAULT_LATCHED_STATUS_4 0x5c
#define RAA215300_FAULT_LATCHED_STATUS_6 0x5e
#define RAA215300_INT_MASK_1 0x64
#define RAA215300_INT_MASK_2 0x65
#define RAA215300_INT_MASK_3 0x66
#define RAA215300_INT_MASK_4 0x67
#define RAA215300_INT_MASK_6 0x68
#define RAA215300_REG_BLOCK_EN 0x6c
#define RAA215300_HW_REV 0xf8
#define RAA215300_INT_MASK_1_ALL GENMASK(5, 0)
#define RAA215300_INT_MASK_2_ALL GENMASK(3, 0)
#define RAA215300_INT_MASK_3_ALL GENMASK(5, 0)
#define RAA215300_INT_MASK_4_ALL BIT(0)
#define RAA215300_INT_MASK_6_ALL GENMASK(7, 0)
#define RAA215300_REG_BLOCK_EN_RTC_EN BIT(6)
#define RAA215300_RTC_DEFAULT_ADDR 0x6f
static const struct regmap_config raa215300_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
};
static void raa215300_rtc_unregister_device(void *data)
{
i2c_unregister_device(data);
}
static int raa215300_clk_present(struct i2c_client *client, const char *name)
{
struct clk *clk;
clk = devm_clk_get_optional(&client->dev, name);
if (IS_ERR(clk))
return PTR_ERR(clk);
return !!clk;
}
static int raa215300_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
const char *clkin_name = "clkin";
unsigned int pmic_version, val;
const char *xin_name = "xin";
const char *clk_name = NULL;
struct regmap *regmap;
int ret;
regmap = devm_regmap_init_i2c(client, &raa215300_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"regmap i2c init failed\n");
ret = regmap_read(regmap, RAA215300_HW_REV, &pmic_version);
if (ret < 0)
return dev_err_probe(dev, ret, "HW rev read failed\n");
dev_dbg(dev, "RAA215300 PMIC version 0x%04x\n", pmic_version);
/* Clear all blocks except RTC, if enabled */
regmap_read(regmap, RAA215300_REG_BLOCK_EN, &val);
val &= RAA215300_REG_BLOCK_EN_RTC_EN;
regmap_write(regmap, RAA215300_REG_BLOCK_EN, val);
/* Clear the latched registers */
regmap_read(regmap, RAA215300_FAULT_LATCHED_STATUS_1, &val);
regmap_write(regmap, RAA215300_FAULT_LATCHED_STATUS_1, val);
regmap_read(regmap, RAA215300_FAULT_LATCHED_STATUS_2, &val);
regmap_write(regmap, RAA215300_FAULT_LATCHED_STATUS_2, val);
regmap_read(regmap, RAA215300_FAULT_LATCHED_STATUS_3, &val);
regmap_write(regmap, RAA215300_FAULT_LATCHED_STATUS_3, val);
regmap_read(regmap, RAA215300_FAULT_LATCHED_STATUS_4, &val);
regmap_write(regmap, RAA215300_FAULT_LATCHED_STATUS_4, val);
regmap_read(regmap, RAA215300_FAULT_LATCHED_STATUS_6, &val);
regmap_write(regmap, RAA215300_FAULT_LATCHED_STATUS_6, val);
/* Mask all the PMIC interrupts */
regmap_write(regmap, RAA215300_INT_MASK_1, RAA215300_INT_MASK_1_ALL);
regmap_write(regmap, RAA215300_INT_MASK_2, RAA215300_INT_MASK_2_ALL);
regmap_write(regmap, RAA215300_INT_MASK_3, RAA215300_INT_MASK_3_ALL);
regmap_write(regmap, RAA215300_INT_MASK_4, RAA215300_INT_MASK_4_ALL);
regmap_write(regmap, RAA215300_INT_MASK_6, RAA215300_INT_MASK_6_ALL);
ret = raa215300_clk_present(client, xin_name);
if (ret < 0) {
return ret;
} else if (ret) {
clk_name = xin_name;
} else {
ret = raa215300_clk_present(client, clkin_name);
if (ret < 0)
return ret;
if (ret)
clk_name = clkin_name;
}
if (clk_name) {
const char *name = pmic_version >= 0x12 ? "isl1208" : "raa215300_a0";
struct device_node *np = client->dev.of_node;
u32 addr = RAA215300_RTC_DEFAULT_ADDR;
struct i2c_board_info info = {};
struct i2c_client *rtc_client;
struct clk_hw *hw;
ssize_t size;
hw = devm_clk_hw_register_fixed_rate(dev, clk_name, NULL, 0, 32768);
if (IS_ERR(hw))
return PTR_ERR(hw);
ret = devm_clk_hw_register_clkdev(dev, hw, clk_name, NULL);
if (ret)
return dev_err_probe(dev, ret, "Failed to initialize clkdev\n");
if (np) {
int i;
i = of_property_match_string(np, "reg-names", "rtc");
if (i >= 0)
of_property_read_u32_index(np, "reg", i, &addr);
}
info.addr = addr;
if (client->irq > 0)
info.irq = client->irq;
size = strscpy(info.type, name, sizeof(info.type));
if (size < 0)
return dev_err_probe(dev, size,
"Invalid device name: %s\n", name);
/* Enable RTC block */
regmap_update_bits(regmap, RAA215300_REG_BLOCK_EN,
RAA215300_REG_BLOCK_EN_RTC_EN,
RAA215300_REG_BLOCK_EN_RTC_EN);
rtc_client = i2c_new_client_device(client->adapter, &info);
if (IS_ERR(rtc_client))
return PTR_ERR(rtc_client);
ret = devm_add_action_or_reset(dev,
raa215300_rtc_unregister_device,
rtc_client);
if (ret < 0)
return ret;
}
return 0;
}
static const struct of_device_id raa215300_dt_match[] = {
{ .compatible = "renesas,raa215300" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, raa215300_dt_match);
static struct i2c_driver raa215300_i2c_driver = {
.driver = {
.name = "raa215300",
.of_match_table = raa215300_dt_match,
},
.probe = raa215300_i2c_probe,
};
module_i2c_driver(raa215300_i2c_driver);
MODULE_DESCRIPTION("Renesas RAA215300 PMIC driver");
MODULE_AUTHOR("Fabrizio Castro <[email protected]>");
MODULE_AUTHOR("Biju Das <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/raa215300.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tps65217-regulator.c
*
* Regulator driver for TPS65217 PMIC
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65217.h>
#define TPS65217_REGULATOR(_name, _id, _of_match, _ops, _n, _vr, _vm, _em, \
_t, _lr, _nlr, _sr, _sm) \
{ \
.name = _name, \
.id = _id, \
.of_match = of_match_ptr(_of_match), \
.regulators_node= of_match_ptr("regulators"), \
.ops = &_ops, \
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = _vr, \
.vsel_mask = _vm, \
.enable_reg = TPS65217_REG_ENABLE, \
.enable_mask = _em, \
.volt_table = _t, \
.linear_ranges = _lr, \
.n_linear_ranges = _nlr, \
.bypass_reg = _sr, \
.bypass_mask = _sm, \
} \
static const unsigned int LDO1_VSEL_table[] = {
1000000, 1100000, 1200000, 1250000,
1300000, 1350000, 1400000, 1500000,
1600000, 1800000, 2500000, 2750000,
2800000, 3000000, 3100000, 3300000,
};
static const struct linear_range tps65217_uv1_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 25000),
REGULATOR_LINEAR_RANGE(1550000, 25, 52, 50000),
REGULATOR_LINEAR_RANGE(3000000, 53, 55, 100000),
REGULATOR_LINEAR_RANGE(3300000, 56, 63, 0),
};
static const struct linear_range tps65217_uv2_ranges[] = {
REGULATOR_LINEAR_RANGE(1500000, 0, 8, 50000),
REGULATOR_LINEAR_RANGE(2000000, 9, 13, 100000),
REGULATOR_LINEAR_RANGE(2450000, 14, 31, 50000),
};
static int tps65217_pmic_enable(struct regulator_dev *dev)
{
struct tps65217 *tps = rdev_get_drvdata(dev);
int rid = rdev_get_id(dev);
if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
return -EINVAL;
/* Enable the regulator and password protection is level 1 */
return tps65217_set_bits(tps, TPS65217_REG_ENABLE,
dev->desc->enable_mask, dev->desc->enable_mask,
TPS65217_PROTECT_L1);
}
static int tps65217_pmic_disable(struct regulator_dev *dev)
{
struct tps65217 *tps = rdev_get_drvdata(dev);
int rid = rdev_get_id(dev);
if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
return -EINVAL;
/* Disable the regulator and password protection is level 1 */
return tps65217_clear_bits(tps, TPS65217_REG_ENABLE,
dev->desc->enable_mask, TPS65217_PROTECT_L1);
}
static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
int ret;
struct tps65217 *tps = rdev_get_drvdata(dev);
unsigned int rid = rdev_get_id(dev);
/* Set the voltage based on vsel value and write protect level is 2 */
ret = tps65217_set_bits(tps, dev->desc->vsel_reg, dev->desc->vsel_mask,
selector, TPS65217_PROTECT_L2);
/* Set GO bit for DCDCx to initiate voltage transistion */
switch (rid) {
case TPS65217_DCDC_1 ... TPS65217_DCDC_3:
ret = tps65217_set_bits(tps, TPS65217_REG_DEFSLEW,
TPS65217_DEFSLEW_GO, TPS65217_DEFSLEW_GO,
TPS65217_PROTECT_L2);
break;
}
return ret;
}
static int tps65217_pmic_set_suspend_enable(struct regulator_dev *dev)
{
struct tps65217 *tps = rdev_get_drvdata(dev);
unsigned int rid = rdev_get_id(dev);
if (rid > TPS65217_LDO_4)
return -EINVAL;
return tps65217_clear_bits(tps, dev->desc->bypass_reg,
dev->desc->bypass_mask,
TPS65217_PROTECT_L1);
}
static int tps65217_pmic_set_suspend_disable(struct regulator_dev *dev)
{
struct tps65217 *tps = rdev_get_drvdata(dev);
unsigned int rid = rdev_get_id(dev);
if (rid > TPS65217_LDO_4)
return -EINVAL;
if (!tps->strobes[rid])
return -EINVAL;
return tps65217_set_bits(tps, dev->desc->bypass_reg,
dev->desc->bypass_mask,
tps->strobes[rid], TPS65217_PROTECT_L1);
}
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
static const struct regulator_ops tps65217_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_suspend_enable = tps65217_pmic_set_suspend_enable,
.set_suspend_disable = tps65217_pmic_set_suspend_disable,
};
/* Operations permitted on LDO1 */
static const struct regulator_ops tps65217_pmic_ldo1_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_suspend_enable = tps65217_pmic_set_suspend_enable,
.set_suspend_disable = tps65217_pmic_set_suspend_disable,
};
static const struct regulator_desc regulators[] = {
TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, "dcdc1",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC1,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC1_EN,
NULL, tps65217_uv1_ranges,
ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ1,
TPS65217_SEQ1_DC1_SEQ_MASK),
TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, "dcdc2",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC2,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC2_EN,
NULL, tps65217_uv1_ranges,
ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ1,
TPS65217_SEQ1_DC2_SEQ_MASK),
TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, "dcdc3",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC3,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC3_EN,
NULL, tps65217_uv1_ranges,
ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ2,
TPS65217_SEQ2_DC3_SEQ_MASK),
TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, "ldo1",
tps65217_pmic_ldo1_ops, 16, TPS65217_REG_DEFLDO1,
TPS65217_DEFLDO1_LDO1_MASK, TPS65217_ENABLE_LDO1_EN,
LDO1_VSEL_table, NULL, 0, TPS65217_REG_SEQ2,
TPS65217_SEQ2_LDO1_SEQ_MASK),
TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, "ldo2", tps65217_pmic_ops,
64, TPS65217_REG_DEFLDO2,
TPS65217_DEFLDO2_LDO2_MASK, TPS65217_ENABLE_LDO2_EN,
NULL, tps65217_uv1_ranges,
ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ3,
TPS65217_SEQ3_LDO2_SEQ_MASK),
TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, "ldo3", tps65217_pmic_ops,
32, TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK,
TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
NULL, tps65217_uv2_ranges,
ARRAY_SIZE(tps65217_uv2_ranges), TPS65217_REG_SEQ3,
TPS65217_SEQ3_LDO3_SEQ_MASK),
TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, "ldo4", tps65217_pmic_ops,
32, TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK,
TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
NULL, tps65217_uv2_ranges,
ARRAY_SIZE(tps65217_uv2_ranges), TPS65217_REG_SEQ4,
TPS65217_SEQ4_LDO4_SEQ_MASK),
};
static int tps65217_regulator_probe(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_board *pdata = dev_get_platdata(tps->dev);
struct regulator_dev *rdev;
struct regulator_config config = { };
int i, ret;
unsigned int val;
/* Allocate memory for strobes */
tps->strobes = devm_kcalloc(&pdev->dev,
TPS65217_NUM_REGULATOR, sizeof(u8),
GFP_KERNEL);
if (!tps->strobes)
return -ENOMEM;
platform_set_drvdata(pdev, tps);
for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
/* Register the regulators */
config.dev = tps->dev;
if (pdata)
config.init_data = pdata->tps65217_init_data[i];
config.driver_data = tps;
config.regmap = tps->regmap;
rdev = devm_regulator_register(&pdev->dev, ®ulators[i],
&config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "failed to register %s regulator\n",
pdev->name);
return PTR_ERR(rdev);
}
/* Store default strobe info */
ret = tps65217_reg_read(tps, regulators[i].bypass_reg, &val);
if (ret)
return ret;
tps->strobes[i] = val & regulators[i].bypass_mask;
}
return 0;
}
static struct platform_driver tps65217_regulator_driver = {
.driver = {
.name = "tps65217-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tps65217_regulator_probe,
};
static int __init tps65217_regulator_init(void)
{
return platform_driver_register(&tps65217_regulator_driver);
}
subsys_initcall(tps65217_regulator_init);
static void __exit tps65217_regulator_exit(void)
{
platform_driver_unregister(&tps65217_regulator_driver);
}
module_exit(tps65217_regulator_exit);
MODULE_AUTHOR("AnilKumar Ch <[email protected]>");
MODULE_DESCRIPTION("TPS65217 voltage regulator driver");
MODULE_ALIAS("platform:tps65217-pmic");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/tps65217-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright 2014 Embest Technology Co. Ltd. Inc.
// bd71815-regulator.c ROHM BD71815 regulator driver
//
// Author: Tony Luo <[email protected]>
//
// Partially rewritten at 2021 by
// Matti Vaittinen <[email protected]>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/mfd/rohm-bd71815.h>
#include <linux/regulator/of_regulator.h>
struct bd71815_regulator {
struct regulator_desc desc;
const struct rohm_dvs_config *dvs;
};
static const int bd7181x_wled_currents[] = {
10, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000, 2000, 3000, 4000,
5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 15000,
16000, 17000, 18000, 19000, 20000, 21000, 22000, 23000, 24000, 25000,
};
static const struct rohm_dvs_config buck1_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_BUCK1_VOLT_H,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = BD71815_BUCK_RUN_ON,
.snvs_on_mask = BD71815_BUCK_SNVS_ON,
.suspend_reg = BD71815_REG_BUCK1_VOLT_L,
.suspend_mask = BD71815_VOLT_MASK,
.suspend_on_mask = BD71815_BUCK_SUSP_ON,
.lpsr_reg = BD71815_REG_BUCK1_VOLT_L,
.lpsr_mask = BD71815_VOLT_MASK,
.lpsr_on_mask = BD71815_BUCK_LPSR_ON,
};
static const struct rohm_dvs_config buck2_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_BUCK2_VOLT_H,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = BD71815_BUCK_RUN_ON,
.snvs_on_mask = BD71815_BUCK_SNVS_ON,
.suspend_reg = BD71815_REG_BUCK2_VOLT_L,
.suspend_mask = BD71815_VOLT_MASK,
.suspend_on_mask = BD71815_BUCK_SUSP_ON,
.lpsr_reg = BD71815_REG_BUCK2_VOLT_L,
.lpsr_mask = BD71815_VOLT_MASK,
.lpsr_on_mask = BD71815_BUCK_LPSR_ON,
};
static const struct rohm_dvs_config buck3_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_BUCK3_VOLT,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = BD71815_BUCK_RUN_ON,
.snvs_on_mask = BD71815_BUCK_SNVS_ON,
.suspend_on_mask = BD71815_BUCK_SUSP_ON,
.lpsr_on_mask = BD71815_BUCK_LPSR_ON,
};
static const struct rohm_dvs_config buck4_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_BUCK4_VOLT,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = BD71815_BUCK_RUN_ON,
.snvs_on_mask = BD71815_BUCK_SNVS_ON,
.suspend_on_mask = BD71815_BUCK_SUSP_ON,
.lpsr_on_mask = BD71815_BUCK_LPSR_ON,
};
static const struct rohm_dvs_config ldo1_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_LDO_MODE1,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = LDO1_RUN_ON,
.snvs_on_mask = LDO1_SNVS_ON,
.suspend_on_mask = LDO1_SUSP_ON,
.lpsr_on_mask = LDO1_LPSR_ON,
};
static const struct rohm_dvs_config ldo2_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_LDO_MODE2,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = LDO2_RUN_ON,
.snvs_on_mask = LDO2_SNVS_ON,
.suspend_on_mask = LDO2_SUSP_ON,
.lpsr_on_mask = LDO2_LPSR_ON,
};
static const struct rohm_dvs_config ldo3_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_LDO_MODE2,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = LDO3_RUN_ON,
.snvs_on_mask = LDO3_SNVS_ON,
.suspend_on_mask = LDO3_SUSP_ON,
.lpsr_on_mask = LDO3_LPSR_ON,
};
static const struct rohm_dvs_config ldo4_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_LDO_MODE3,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = LDO4_RUN_ON,
.snvs_on_mask = LDO4_SNVS_ON,
.suspend_on_mask = LDO4_SUSP_ON,
.lpsr_on_mask = LDO4_LPSR_ON,
};
static const struct rohm_dvs_config ldo5_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_LDO_MODE3,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = LDO5_RUN_ON,
.snvs_on_mask = LDO5_SNVS_ON,
.suspend_on_mask = LDO5_SUSP_ON,
.lpsr_on_mask = LDO5_LPSR_ON,
};
static const struct rohm_dvs_config dvref_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_on_mask = DVREF_RUN_ON,
.snvs_on_mask = DVREF_SNVS_ON,
.suspend_on_mask = DVREF_SUSP_ON,
.lpsr_on_mask = DVREF_LPSR_ON,
};
static const struct rohm_dvs_config ldolpsr_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_on_mask = DVREF_RUN_ON,
.snvs_on_mask = DVREF_SNVS_ON,
.suspend_on_mask = DVREF_SUSP_ON,
.lpsr_on_mask = DVREF_LPSR_ON,
};
static const struct rohm_dvs_config buck5_dvs = {
.level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_SNVS |
ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71815_REG_BUCK5_VOLT,
.run_mask = BD71815_VOLT_MASK,
.run_on_mask = BD71815_BUCK_RUN_ON,
.snvs_on_mask = BD71815_BUCK_SNVS_ON,
.suspend_on_mask = BD71815_BUCK_SUSP_ON,
.lpsr_on_mask = BD71815_BUCK_LPSR_ON,
};
static int set_hw_dvs_levels(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *cfg)
{
struct bd71815_regulator *data;
data = container_of(desc, struct bd71815_regulator, desc);
return rohm_regulator_set_dvs_levels(data->dvs, np, desc, cfg->regmap);
}
/*
* Bucks 1 and 2 have two voltage selection registers where selected
* voltage can be set. Which of the registers is used can be either controlled
* by a control bit in register - or by HW state. If HW state specific voltages
* are given - then we assume HW state based control should be used.
*
* If volatge value is updated to currently selected register - then output
* voltage is immediately changed no matter what is set as ramp rate. Thus we
* default changing voltage by writing new value to inactive register and
* then updating the 'register selection' bit. This naturally only works when
* HW state machine is not used to select the voltage.
*/
static int buck12_set_hw_dvs_levels(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *cfg)
{
struct bd71815_regulator *data;
int ret = 0, val;
data = container_of(desc, struct bd71815_regulator, desc);
if (of_property_present(np, "rohm,dvs-run-voltage") ||
of_property_present(np, "rohm,dvs-suspend-voltage") ||
of_property_present(np, "rohm,dvs-lpsr-voltage") ||
of_property_present(np, "rohm,dvs-snvs-voltage")) {
ret = regmap_read(cfg->regmap, desc->vsel_reg, &val);
if (ret)
return ret;
if (!(BD71815_BUCK_STBY_DVS & val) &&
!(BD71815_BUCK_DVSSEL & val)) {
int val2;
/*
* We are currently using voltage from _L.
* We'd better copy it to _H and switch to it to
* avoid shutting us down if LPSR or SUSPEND is set to
* disabled. _L value is at reg _H + 1
*/
ret = regmap_read(cfg->regmap, desc->vsel_reg + 1,
&val2);
if (ret)
return ret;
ret = regmap_update_bits(cfg->regmap, desc->vsel_reg,
BD71815_VOLT_MASK |
BD71815_BUCK_DVSSEL,
val2 | BD71815_BUCK_DVSSEL);
if (ret)
return ret;
}
ret = rohm_regulator_set_dvs_levels(data->dvs, np, desc,
cfg->regmap);
if (ret)
return ret;
/*
* DVS levels were given => use HW-state machine for voltage
* controls. NOTE: AFAIK, This means that if voltage is changed
* by SW the ramp-rate is not respected. Should we disable
* SW voltage control when the HW state machine is used?
*/
ret = regmap_update_bits(cfg->regmap, desc->vsel_reg,
BD71815_BUCK_STBY_DVS,
BD71815_BUCK_STBY_DVS);
}
return ret;
}
/*
* BUCK1/2
* BUCK1RAMPRATE[1:0] BUCK1 DVS ramp rate setting
* 00: 10.00mV/usec 10mV 1uS
* 01: 5.00mV/usec 10mV 2uS
* 10: 2.50mV/usec 10mV 4uS
* 11: 1.25mV/usec 10mV 8uS
*/
static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
{
int ret;
int onstatus;
onstatus = regulator_is_enabled_regmap(rdev);
ret = regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
if (!ret) {
int newstatus;
newstatus = regulator_is_enabled_regmap(rdev);
if (onstatus != newstatus) {
/*
* HW FIX: spurious led status change detected. Toggle
* state as a workaround
*/
if (onstatus)
ret = regulator_enable_regmap(rdev);
else
ret = regulator_disable_regmap(rdev);
if (ret)
dev_err(rdev_get_dev(rdev),
"failed to revert the LED state (%d)\n",
ret);
}
}
return ret;
}
static int bd7181x_buck12_get_voltage_sel(struct regulator_dev *rdev)
{
int rid = rdev_get_id(rdev);
int ret, regh, regl, val;
regh = BD71815_REG_BUCK1_VOLT_H + rid * 0x2;
regl = BD71815_REG_BUCK1_VOLT_L + rid * 0x2;
ret = regmap_read(rdev->regmap, regh, &val);
if (ret)
return ret;
/*
* If we use HW state machine based voltage reg selection - then we
* return BD71815_REG_BUCK1_VOLT_H which is used at RUN.
* Else we do return the BD71815_REG_BUCK1_VOLT_H or
* BD71815_REG_BUCK1_VOLT_L depending on which is selected to be used
* by BD71815_BUCK_DVSSEL bit
*/
if ((!(val & BD71815_BUCK_STBY_DVS)) && (!(val & BD71815_BUCK_DVSSEL)))
ret = regmap_read(rdev->regmap, regl, &val);
if (ret)
return ret;
return val & BD71815_VOLT_MASK;
}
/*
* For Buck 1/2.
*/
static int bd7181x_buck12_set_voltage_sel(struct regulator_dev *rdev,
unsigned int sel)
{
int rid = rdev_get_id(rdev);
int ret, val, reg, regh, regl;
regh = BD71815_REG_BUCK1_VOLT_H + rid*0x2;
regl = BD71815_REG_BUCK1_VOLT_L + rid*0x2;
ret = regmap_read(rdev->regmap, regh, &val);
if (ret)
return ret;
/*
* If bucks 1 & 2 are controlled by state machine - then the RUN state
* voltage is set to BD71815_REG_BUCK1_VOLT_H. Changing SUSPEND/LPSR
* voltages at runtime is not supported by this driver.
*/
if (((val & BD71815_BUCK_STBY_DVS))) {
return regmap_update_bits(rdev->regmap, regh, BD71815_VOLT_MASK,
sel);
}
/* Update new voltage to the register which is not selected now */
if (val & BD71815_BUCK_DVSSEL)
reg = regl;
else
reg = regh;
ret = regmap_update_bits(rdev->regmap, reg, BD71815_VOLT_MASK, sel);
if (ret)
return ret;
/* Select the other DVS register to be used */
return regmap_update_bits(rdev->regmap, regh, BD71815_BUCK_DVSSEL,
~val);
}
static const struct regulator_ops bd7181x_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
static const struct regulator_ops bd7181x_fixed_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
};
static const struct regulator_ops bd7181x_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
static const struct regulator_ops bd7181x_buck12_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = bd7181x_buck12_set_voltage_sel,
.get_voltage_sel = bd7181x_buck12_get_voltage_sel,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct regulator_ops bd7181x_led_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_current_limit = bd7181x_led_set_current_limit,
.get_current_limit = regulator_get_current_limit_regmap,
};
#define BD71815_FIXED_REG(_name, _id, ereg, emsk, voltage, _dvs) \
[(_id)] = { \
.desc = { \
.name = #_name, \
.of_match = of_match_ptr(#_name), \
.regulators_node = of_match_ptr("regulators"), \
.n_voltages = 1, \
.ops = &bd7181x_fixed_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = (_id), \
.owner = THIS_MODULE, \
.min_uV = (voltage), \
.enable_reg = (ereg), \
.enable_mask = (emsk), \
.of_parse_cb = set_hw_dvs_levels, \
}, \
.dvs = (_dvs), \
}
#define BD71815_BUCK_REG(_name, _id, vsel, ereg, min, max, step, _dvs) \
[(_id)] = { \
.desc = { \
.name = #_name, \
.of_match = of_match_ptr(#_name), \
.regulators_node = of_match_ptr("regulators"), \
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &bd7181x_buck_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = (_id), \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (vsel), \
.vsel_mask = BD71815_VOLT_MASK, \
.enable_reg = (ereg), \
.enable_mask = BD71815_BUCK_RUN_ON, \
.of_parse_cb = set_hw_dvs_levels, \
}, \
.dvs = (_dvs), \
}
#define BD71815_BUCK12_REG(_name, _id, vsel, ereg, min, max, step, \
_dvs) \
[(_id)] = { \
.desc = { \
.name = #_name, \
.of_match = of_match_ptr(#_name), \
.regulators_node = of_match_ptr("regulators"), \
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &bd7181x_buck12_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = (_id), \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (vsel), \
.vsel_mask = BD71815_VOLT_MASK, \
.enable_reg = (ereg), \
.enable_mask = BD71815_BUCK_RUN_ON, \
.ramp_reg = (ereg), \
.ramp_mask = BD71815_BUCK_RAMPRATE_MASK, \
.ramp_delay_table = bd7181x_ramp_table, \
.n_ramp_values = ARRAY_SIZE(bd7181x_ramp_table),\
.of_parse_cb = buck12_set_hw_dvs_levels, \
}, \
.dvs = (_dvs), \
}
#define BD71815_LED_REG(_name, _id, csel, mask, ereg, emsk, currents) \
[(_id)] = { \
.desc = { \
.name = #_name, \
.of_match = of_match_ptr(#_name), \
.regulators_node = of_match_ptr("regulators"), \
.n_current_limits = ARRAY_SIZE(currents), \
.ops = &bd7181x_led_regulator_ops, \
.type = REGULATOR_CURRENT, \
.id = (_id), \
.owner = THIS_MODULE, \
.curr_table = currents, \
.csel_reg = (csel), \
.csel_mask = (mask), \
.enable_reg = (ereg), \
.enable_mask = (emsk), \
}, \
}
#define BD71815_LDO_REG(_name, _id, vsel, ereg, emsk, min, max, step, \
_dvs) \
[(_id)] = { \
.desc = { \
.name = #_name, \
.of_match = of_match_ptr(#_name), \
.regulators_node = of_match_ptr("regulators"), \
.n_voltages = ((max) - (min)) / (step) + 1, \
.ops = &bd7181x_ldo_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = (_id), \
.owner = THIS_MODULE, \
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (vsel), \
.vsel_mask = BD71815_VOLT_MASK, \
.enable_reg = (ereg), \
.enable_mask = (emsk), \
.of_parse_cb = set_hw_dvs_levels, \
}, \
.dvs = (_dvs), \
}
static const struct bd71815_regulator bd71815_regulators[] = {
BD71815_BUCK12_REG(buck1, BD71815_BUCK1, BD71815_REG_BUCK1_VOLT_H,
BD71815_REG_BUCK1_MODE, 800000, 2000000, 25000,
&buck1_dvs),
BD71815_BUCK12_REG(buck2, BD71815_BUCK2, BD71815_REG_BUCK2_VOLT_H,
BD71815_REG_BUCK2_MODE, 800000, 2000000, 25000,
&buck2_dvs),
BD71815_BUCK_REG(buck3, BD71815_BUCK3, BD71815_REG_BUCK3_VOLT,
BD71815_REG_BUCK3_MODE, 1200000, 2700000, 50000,
&buck3_dvs),
BD71815_BUCK_REG(buck4, BD71815_BUCK4, BD71815_REG_BUCK4_VOLT,
BD71815_REG_BUCK4_MODE, 1100000, 1850000, 25000,
&buck4_dvs),
BD71815_BUCK_REG(buck5, BD71815_BUCK5, BD71815_REG_BUCK5_VOLT,
BD71815_REG_BUCK5_MODE, 1800000, 3300000, 50000,
&buck5_dvs),
BD71815_LDO_REG(ldo1, BD71815_LDO1, BD71815_REG_LDO1_VOLT,
BD71815_REG_LDO_MODE1, LDO1_RUN_ON, 800000, 3300000,
50000, &ldo1_dvs),
BD71815_LDO_REG(ldo2, BD71815_LDO2, BD71815_REG_LDO2_VOLT,
BD71815_REG_LDO_MODE2, LDO2_RUN_ON, 800000, 3300000,
50000, &ldo2_dvs),
/*
* Let's default LDO3 to be enabled by SW. We can override ops if DT
* says LDO3 should be enabled by HW when DCIN is connected.
*/
BD71815_LDO_REG(ldo3, BD71815_LDO3, BD71815_REG_LDO3_VOLT,
BD71815_REG_LDO_MODE2, LDO3_RUN_ON, 800000, 3300000,
50000, &ldo3_dvs),
BD71815_LDO_REG(ldo4, BD71815_LDO4, BD71815_REG_LDO4_VOLT,
BD71815_REG_LDO_MODE3, LDO4_RUN_ON, 800000, 3300000,
50000, &ldo4_dvs),
BD71815_LDO_REG(ldo5, BD71815_LDO5, BD71815_REG_LDO5_VOLT_H,
BD71815_REG_LDO_MODE3, LDO5_RUN_ON, 800000, 3300000,
50000, &ldo5_dvs),
BD71815_FIXED_REG(ldodvref, BD71815_LDODVREF, BD71815_REG_LDO_MODE4,
DVREF_RUN_ON, 3000000, &dvref_dvs),
BD71815_FIXED_REG(ldolpsr, BD71815_LDOLPSR, BD71815_REG_LDO_MODE4,
LDO_LPSR_RUN_ON, 1800000, &ldolpsr_dvs),
BD71815_LED_REG(wled, BD71815_WLED, BD71815_REG_LED_DIMM, LED_DIMM_MASK,
BD71815_REG_LED_CTRL, LED_RUN_ON,
bd7181x_wled_currents),
};
static int bd7181x_probe(struct platform_device *pdev)
{
struct regulator_config config = {};
int i, ret;
struct gpio_desc *ldo4_en;
struct regmap *regmap;
regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!regmap) {
dev_err(&pdev->dev, "No parent regmap\n");
return -ENODEV;
}
ldo4_en = devm_fwnode_gpiod_get(&pdev->dev,
dev_fwnode(pdev->dev.parent),
"rohm,vsel", GPIOD_ASIS, "ldo4-en");
if (IS_ERR(ldo4_en)) {
ret = PTR_ERR(ldo4_en);
if (ret != -ENOENT)
return ret;
ldo4_en = NULL;
}
/* Disable to go to ship-mode */
ret = regmap_update_bits(regmap, BD71815_REG_PWRCTRL, RESTARTEN, 0);
if (ret)
return ret;
config.dev = pdev->dev.parent;
config.regmap = regmap;
for (i = 0; i < BD71815_REGULATOR_CNT; i++) {
const struct regulator_desc *desc;
struct regulator_dev *rdev;
desc = &bd71815_regulators[i].desc;
if (i == BD71815_LDO4)
config.ena_gpiod = ldo4_en;
else
config.ena_gpiod = NULL;
rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev))
return dev_err_probe(&pdev->dev, PTR_ERR(rdev),
"failed to register %s regulator\n",
desc->name);
}
return 0;
}
static const struct platform_device_id bd7181x_pmic_id[] = {
{ "bd71815-pmic", ROHM_CHIP_TYPE_BD71815 },
{ },
};
MODULE_DEVICE_TABLE(platform, bd7181x_pmic_id);
static struct platform_driver bd7181x_regulator = {
.driver = {
.name = "bd7181x-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = bd7181x_probe,
.id_table = bd7181x_pmic_id,
};
module_platform_driver(bd7181x_regulator);
MODULE_AUTHOR("Tony Luo <[email protected]>");
MODULE_DESCRIPTION("BD71815 voltage regulator driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:bd7181x-pmic");
| linux-master | drivers/regulator/bd71815-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Device driver for RT5739 regulator
*
* Copyright (C) 2023 Richtek Technology Corp.
*
* Author: ChiYuan Huang <[email protected]>
*/
#include <linux/bits.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#define RT5739_AUTO_MODE 0
#define RT5739_FPWM_MODE 1
#define RT5739_REG_NSEL0 0x00
#define RT5739_REG_NSEL1 0x01
#define RT5739_REG_CNTL1 0x02
#define RT5739_REG_ID1 0x03
#define RT5739_REG_CNTL2 0x06
#define RT5739_REG_CNTL4 0x08
#define RT5739_VSEL_MASK GENMASK(7, 0)
#define RT5739_MODEVSEL1_MASK BIT(1)
#define RT5739_MODEVSEL0_MASK BIT(0)
#define RT5739_VID_MASK GENMASK(7, 5)
#define RT5739_DID_MASK GENMASK(3, 0)
#define RT5739_ACTD_MASK BIT(7)
#define RT5739_ENVSEL1_MASK BIT(1)
#define RT5739_ENVSEL0_MASK BIT(0)
#define RT5733_CHIPDIE_ID 0x1
#define RT5733_VOLT_MINUV 270000
#define RT5733_VOLT_MAXUV 1401250
#define RT5733_VOLT_STPUV 6250
#define RT5733_N_VOLTS 182
#define RT5739_VOLT_MINUV 300000
#define RT5739_VOLT_MAXUV 1300000
#define RT5739_VOLT_STPUV 5000
#define RT5739_N_VOLTS 201
#define RT5739_I2CRDY_TIMEUS 1000
static int rt5739_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
const struct regulator_desc *desc = rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int mask, val;
if (desc->vsel_reg == RT5739_REG_NSEL0)
mask = RT5739_MODEVSEL0_MASK;
else
mask = RT5739_MODEVSEL1_MASK;
switch (mode) {
case REGULATOR_MODE_FAST:
val = mask;
break;
case REGULATOR_MODE_NORMAL:
val = 0;
break;
default:
return -EINVAL;
}
return regmap_update_bits(regmap, RT5739_REG_CNTL1, mask, val);
}
static unsigned int rt5739_get_mode(struct regulator_dev *rdev)
{
const struct regulator_desc *desc = rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int mask, val;
int ret;
if (desc->vsel_reg == RT5739_REG_NSEL0)
mask = RT5739_MODEVSEL0_MASK;
else
mask = RT5739_MODEVSEL1_MASK;
ret = regmap_read(regmap, RT5739_REG_CNTL1, &val);
if (ret)
return REGULATOR_MODE_INVALID;
if (val & mask)
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_NORMAL;
}
static int rt5739_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
const struct regulator_desc *desc = rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int reg, vsel;
int max_uV;
max_uV = desc->min_uV + desc->uV_step * (desc->n_voltages - 1);
if (uV < desc->min_uV || uV > max_uV)
return -EINVAL;
if (desc->vsel_reg == RT5739_REG_NSEL0)
reg = RT5739_REG_NSEL1;
else
reg = RT5739_REG_NSEL0;
vsel = (uV - desc->min_uV) / desc->uV_step;
return regmap_write(regmap, reg, vsel);
}
static int rt5739_set_suspend_enable(struct regulator_dev *rdev)
{
const struct regulator_desc *desc = rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int mask;
if (desc->vsel_reg == RT5739_REG_NSEL0)
mask = RT5739_ENVSEL1_MASK;
else
mask = RT5739_ENVSEL0_MASK;
return regmap_update_bits(regmap, desc->enable_reg, mask, mask);
}
static int rt5739_set_suspend_disable(struct regulator_dev *rdev)
{
const struct regulator_desc *desc = rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int mask;
if (desc->vsel_reg == RT5739_REG_NSEL0)
mask = RT5739_ENVSEL1_MASK;
else
mask = RT5739_ENVSEL0_MASK;
return regmap_update_bits(regmap, desc->enable_reg, mask, 0);
}
static int rt5739_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
const struct regulator_desc *desc = rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int mask, val;
if (desc->vsel_reg == RT5739_REG_NSEL0)
mask = RT5739_MODEVSEL1_MASK;
else
mask = RT5739_MODEVSEL0_MASK;
switch (mode) {
case REGULATOR_MODE_FAST:
val = mask;
break;
case REGULATOR_MODE_NORMAL:
val = 0;
break;
default:
return -EINVAL;
}
return regmap_update_bits(regmap, RT5739_REG_CNTL1, mask, val);
}
static const struct regulator_ops rt5739_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.set_mode = rt5739_set_mode,
.get_mode = rt5739_get_mode,
.set_suspend_voltage = rt5739_set_suspend_voltage,
.set_suspend_enable = rt5739_set_suspend_enable,
.set_suspend_disable = rt5739_set_suspend_disable,
.set_suspend_mode = rt5739_set_suspend_mode,
};
static unsigned int rt5739_of_map_mode(unsigned int mode)
{
switch (mode) {
case RT5739_AUTO_MODE:
return REGULATOR_MODE_NORMAL;
case RT5739_FPWM_MODE:
return REGULATOR_MODE_FAST;
default:
return REGULATOR_MODE_INVALID;
}
}
static void rt5739_init_regulator_desc(struct regulator_desc *desc,
bool vsel_active_high, u8 did)
{
/* Fixed */
desc->name = "rt5739-regulator";
desc->owner = THIS_MODULE;
desc->ops = &rt5739_regulator_ops;
desc->vsel_mask = RT5739_VSEL_MASK;
desc->enable_reg = RT5739_REG_CNTL2;
desc->active_discharge_reg = RT5739_REG_CNTL1;
desc->active_discharge_mask = RT5739_ACTD_MASK;
desc->active_discharge_on = RT5739_ACTD_MASK;
desc->of_map_mode = rt5739_of_map_mode;
/* Assigned by vsel level */
if (vsel_active_high) {
desc->vsel_reg = RT5739_REG_NSEL1;
desc->enable_mask = RT5739_ENVSEL1_MASK;
} else {
desc->vsel_reg = RT5739_REG_NSEL0;
desc->enable_mask = RT5739_ENVSEL0_MASK;
}
/* Assigned by CHIPDIE ID */
switch (did) {
case RT5733_CHIPDIE_ID:
desc->n_voltages = RT5733_N_VOLTS;
desc->min_uV = RT5733_VOLT_MINUV;
desc->uV_step = RT5733_VOLT_STPUV;
break;
default:
desc->n_voltages = RT5739_N_VOLTS;
desc->min_uV = RT5739_VOLT_MINUV;
desc->uV_step = RT5739_VOLT_STPUV;
break;
}
}
static const struct regmap_config rt5739_regmap_config = {
.name = "rt5739",
.reg_bits = 8,
.val_bits = 8,
.max_register = RT5739_REG_CNTL4,
};
static int rt5739_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_desc *desc;
struct regmap *regmap;
struct gpio_desc *enable_gpio;
struct regulator_config cfg = {};
struct regulator_dev *rdev;
bool vsel_acth;
unsigned int vid;
int ret;
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(enable_gpio))
return dev_err_probe(dev, PTR_ERR(enable_gpio), "Failed to get 'enable' gpio\n");
else if (enable_gpio)
usleep_range(RT5739_I2CRDY_TIMEUS, RT5739_I2CRDY_TIMEUS + 1000);
regmap = devm_regmap_init_i2c(i2c, &rt5739_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap), "Failed to init regmap\n");
ret = regmap_read(regmap, RT5739_REG_ID1, &vid);
if (ret)
return dev_err_probe(dev, ret, "Failed to read VID\n");
/* RT5739: (VID & MASK) must be 0 */
if (vid & RT5739_VID_MASK)
return dev_err_probe(dev, -ENODEV, "Incorrect VID (0x%02x)\n", vid);
vsel_acth = device_property_read_bool(dev, "richtek,vsel-active-high");
rt5739_init_regulator_desc(desc, vsel_acth, vid & RT5739_DID_MASK);
cfg.dev = dev;
cfg.of_node = dev_of_node(dev);
cfg.init_data = of_get_regulator_init_data(dev, dev_of_node(dev), desc);
rdev = devm_regulator_register(dev, desc, &cfg);
if (IS_ERR(rdev))
return dev_err_probe(dev, PTR_ERR(rdev), "Failed to register regulator\n");
return 0;
}
static const struct of_device_id rt5739_device_table[] = {
{ .compatible = "richtek,rt5733" },
{ .compatible = "richtek,rt5739" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rt5739_device_table);
static struct i2c_driver rt5739_driver = {
.driver = {
.name = "rt5739",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rt5739_device_table,
},
.probe = rt5739_probe,
};
module_i2c_driver(rt5739_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("Richtek RT5739 regulator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/rt5739.c |
// SPDX-License-Identifier: GPL-2.0
//
// Device driver for regulators in Hi6421 IC
//
// Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
// http://www.hisilicon.com
// Copyright (c) <2013-2014> Linaro Ltd.
// https://www.linaro.org
//
// Author: Guodong Xu <[email protected]>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/hi6421-pmic.h>
/*
* struct hi6421_regulator_pdata - Hi6421 regulator data of platform device
* @lock: mutex to serialize regulator enable
*/
struct hi6421_regulator_pdata {
struct mutex lock;
};
/*
* struct hi6421_regulator_info - hi6421 regulator information
* @desc: regulator description
* @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep
* @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only
*/
struct hi6421_regulator_info {
struct regulator_desc desc;
u8 mode_mask;
u32 eco_microamp;
};
/* HI6421 regulators */
enum hi6421_regulator_id {
HI6421_LDO0,
HI6421_LDO1,
HI6421_LDO2,
HI6421_LDO3,
HI6421_LDO4,
HI6421_LDO5,
HI6421_LDO6,
HI6421_LDO7,
HI6421_LDO8,
HI6421_LDO9,
HI6421_LDO10,
HI6421_LDO11,
HI6421_LDO12,
HI6421_LDO13,
HI6421_LDO14,
HI6421_LDO15,
HI6421_LDO16,
HI6421_LDO17,
HI6421_LDO18,
HI6421_LDO19,
HI6421_LDO20,
HI6421_LDOAUDIO,
HI6421_BUCK0,
HI6421_BUCK1,
HI6421_BUCK2,
HI6421_BUCK3,
HI6421_BUCK4,
HI6421_BUCK5,
HI6421_NUM_REGULATORS,
};
/* LDO 0, 4~7, 9~14, 16~20 have same voltage table. */
static const unsigned int ldo_0_voltages[] = {
1500000, 1800000, 2400000, 2500000,
2600000, 2700000, 2850000, 3000000,
};
/* LDO 8, 15 have same voltage table. */
static const unsigned int ldo_8_voltages[] = {
1500000, 1800000, 2400000, 2600000,
2700000, 2850000, 3000000, 3300000,
};
/* Ranges are sorted in ascending order. */
static const struct linear_range ldo_audio_volt_range[] = {
REGULATOR_LINEAR_RANGE(2800000, 0, 3, 50000),
REGULATOR_LINEAR_RANGE(3000000, 4, 7, 100000),
};
static const unsigned int buck_3_voltages[] = {
950000, 1050000, 1100000, 1117000,
1134000, 1150000, 1167000, 1200000,
};
static const unsigned int buck_4_voltages[] = {
1150000, 1200000, 1250000, 1350000,
1700000, 1800000, 1900000, 2000000,
};
static const unsigned int buck_5_voltages[] = {
1150000, 1200000, 1250000, 1350000,
1600000, 1700000, 1800000, 1900000,
};
static const struct regulator_ops hi6421_ldo_ops;
static const struct regulator_ops hi6421_ldo_linear_ops;
static const struct regulator_ops hi6421_ldo_linear_range_ops;
static const struct regulator_ops hi6421_buck012_ops;
static const struct regulator_ops hi6421_buck345_ops;
#define HI6421_LDO_ENABLE_TIME (350)
/*
* _id - LDO id name string
* _match - of match name string
* v_table - voltage table
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
* emask - enable mask
* odelay - off/on delay time in uS
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
#define HI6421_LDO(_id, _match, v_table, vreg, vmask, ereg, emask, \
odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
.of_match = #_match, \
.regulators_node = "regulators", \
.ops = &hi6421_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(v_table), \
.volt_table = v_table, \
.vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
.enable_mask = emask, \
.enable_time = HI6421_LDO_ENABLE_TIME, \
.off_on_delay = odelay, \
}, \
.mode_mask = ecomask, \
.eco_microamp = ecoamp, \
}
/* HI6421 LDO1~3 are linear voltage regulators at fixed uV_step
*
* _id - LDO id name string
* _match - of match name string
* _min_uV - minimum voltage supported in uV
* n_volt - number of votages available
* vstep - voltage increase in each linear step in uV
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
* emask - enable mask
* odelay - off/on delay time in uS
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
#define HI6421_LDO_LINEAR(_id, _match, _min_uV, n_volt, vstep, vreg, vmask,\
ereg, emask, odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
.of_match = #_match, \
.regulators_node = "regulators", \
.ops = &hi6421_ldo_linear_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
.owner = THIS_MODULE, \
.min_uV = _min_uV, \
.n_voltages = n_volt, \
.uV_step = vstep, \
.vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
.enable_mask = emask, \
.enable_time = HI6421_LDO_ENABLE_TIME, \
.off_on_delay = odelay, \
}, \
.mode_mask = ecomask, \
.eco_microamp = ecoamp, \
}
/* HI6421 LDOAUDIO is a linear voltage regulator with two 4-step ranges
*
* _id - LDO id name string
* _match - of match name string
* n_volt - number of votages available
* volt_ranges - array of linear_range
* vstep - voltage increase in each linear step in uV
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
* emask - enable mask
* odelay - off/on delay time in uS
* ecomask - eco mode mask
* ecoamp - eco mode load uppler limit in uA
*/
#define HI6421_LDO_LINEAR_RANGE(_id, _match, n_volt, volt_ranges, vreg, vmask,\
ereg, emask, odelay, ecomask, ecoamp) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
.of_match = #_match, \
.regulators_node = "regulators", \
.ops = &hi6421_ldo_linear_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
.owner = THIS_MODULE, \
.n_voltages = n_volt, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
.enable_mask = emask, \
.enable_time = HI6421_LDO_ENABLE_TIME, \
.off_on_delay = odelay, \
}, \
.mode_mask = ecomask, \
.eco_microamp = ecoamp, \
}
/* HI6421 BUCK0/1/2 are linear voltage regulators at fixed uV_step
*
* _id - BUCK0/1/2 id name string
* _match - of match name string
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
* emask - enable mask
* sleepmask - mask of sleep mode
* etime - enable time
* odelay - off/on delay time in uS
*/
#define HI6421_BUCK012(_id, _match, vreg, vmask, ereg, emask, sleepmask,\
etime, odelay) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
.of_match = #_match, \
.regulators_node = "regulators", \
.ops = &hi6421_buck012_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
.owner = THIS_MODULE, \
.min_uV = 700000, \
.n_voltages = 128, \
.uV_step = 7086, \
.vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
.enable_mask = emask, \
.enable_time = etime, \
.off_on_delay = odelay, \
}, \
.mode_mask = sleepmask, \
}
/* HI6421 BUCK3/4/5 share similar configurations as LDOs, with exception
* that it supports SLEEP mode, so has different .ops.
*
* _id - LDO id name string
* _match - of match name string
* v_table - voltage table
* vreg - voltage select register
* vmask - voltage select mask
* ereg - enable register
* emask - enable mask
* odelay - off/on delay time in uS
* sleepmask - mask of sleep mode
*/
#define HI6421_BUCK345(_id, _match, v_table, vreg, vmask, ereg, emask, \
odelay, sleepmask) \
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
.of_match = #_match, \
.regulators_node = "regulators", \
.ops = &hi6421_buck345_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(v_table), \
.volt_table = v_table, \
.vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \
.enable_mask = emask, \
.enable_time = HI6421_LDO_ENABLE_TIME, \
.off_on_delay = odelay, \
}, \
.mode_mask = sleepmask, \
}
/* HI6421 regulator information */
static struct hi6421_regulator_info
hi6421_regulator_info[HI6421_NUM_REGULATORS] = {
HI6421_LDO(LDO0, hi6421_vout0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10,
10000, 0x20, 8000),
HI6421_LDO_LINEAR(LDO1, hi6421_vout1, 1700000, 4, 100000, 0x21, 0x03,
0x21, 0x10, 10000, 0x20, 5000),
HI6421_LDO_LINEAR(LDO2, hi6421_vout2, 1050000, 8, 50000, 0x22, 0x07,
0x22, 0x10, 20000, 0x20, 8000),
HI6421_LDO_LINEAR(LDO3, hi6421_vout3, 1050000, 8, 50000, 0x23, 0x07,
0x23, 0x10, 20000, 0x20, 8000),
HI6421_LDO(LDO4, hi6421_vout4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10,
20000, 0x20, 8000),
HI6421_LDO(LDO5, hi6421_vout5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10,
20000, 0x20, 8000),
HI6421_LDO(LDO6, hi6421_vout6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10,
20000, 0x20, 8000),
HI6421_LDO(LDO7, hi6421_vout7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10,
20000, 0x20, 5000),
HI6421_LDO(LDO8, hi6421_vout8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10,
20000, 0x20, 8000),
HI6421_LDO(LDO9, hi6421_vout9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO10, hi6421_vout10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO11, hi6421_vout11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO12, hi6421_vout12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO13, hi6421_vout13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO14, hi6421_vout14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO15, hi6421_vout15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO16, hi6421_vout16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO17, hi6421_vout17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO18, hi6421_vout18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO19, hi6421_vout19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10,
40000, 0x20, 8000),
HI6421_LDO(LDO20, hi6421_vout20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10,
40000, 0x20, 8000),
HI6421_LDO_LINEAR_RANGE(LDOAUDIO, hi6421_vout_audio, 8,
ldo_audio_volt_range, 0x36, 0x70, 0x36, 0x01,
40000, 0x02, 5000),
HI6421_BUCK012(BUCK0, hi6421_buck0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400,
20000),
HI6421_BUCK012(BUCK1, hi6421_buck1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400,
20000),
HI6421_BUCK012(BUCK2, hi6421_buck2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350,
100),
HI6421_BUCK345(BUCK3, hi6421_buck3, buck_3_voltages, 0x13, 0x07, 0x12,
0x01, 20000, 0x10),
HI6421_BUCK345(BUCK4, hi6421_buck4, buck_4_voltages, 0x15, 0x07, 0x14,
0x01, 20000, 0x10),
HI6421_BUCK345(BUCK5, hi6421_buck5, buck_5_voltages, 0x17, 0x07, 0x16,
0x01, 20000, 0x10),
};
static int hi6421_regulator_enable(struct regulator_dev *rdev)
{
struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev);
/* hi6421 spec requires regulator enablement must be serialized:
* - Because when BUCK, LDO switching from off to on, it will have
* a huge instantaneous current; so you can not turn on two or
* more LDO or BUCKs simultaneously, or it may burn the chip.
*/
mutex_lock(&pdata->lock);
/* call regulator regmap helper */
regulator_enable_regmap(rdev);
mutex_unlock(&pdata->lock);
return 0;
}
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info;
unsigned int reg_val;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val);
if (reg_val & info->mode_mask)
return REGULATOR_MODE_IDLE;
return REGULATOR_MODE_NORMAL;
}
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info;
unsigned int reg_val;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val);
if (reg_val & info->mode_mask)
return REGULATOR_MODE_STANDBY;
return REGULATOR_MODE_NORMAL;
}
static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info;
unsigned int new_mode;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
break;
case REGULATOR_MODE_IDLE:
new_mode = info->mode_mask;
break;
default:
return -EINVAL;
}
/* set mode */
regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
info->mode_mask, new_mode);
return 0;
}
static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info;
unsigned int new_mode;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
break;
case REGULATOR_MODE_STANDBY:
new_mode = info->mode_mask;
break;
default:
return -EINVAL;
}
/* set mode */
regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
info->mode_mask, new_mode);
return 0;
}
static unsigned int
hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV, int load_uA)
{
struct hi6421_regulator_info *info;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
if (load_uA > info->eco_microamp)
return REGULATOR_MODE_NORMAL;
return REGULATOR_MODE_IDLE;
}
static const struct regulator_ops hi6421_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_regulator_enable,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_regulator_ldo_get_mode,
.set_mode = hi6421_regulator_ldo_set_mode,
.get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode,
};
static const struct regulator_ops hi6421_ldo_linear_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_regulator_enable,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_regulator_ldo_get_mode,
.set_mode = hi6421_regulator_ldo_set_mode,
.get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode,
};
static const struct regulator_ops hi6421_ldo_linear_range_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_regulator_enable,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_regulator_ldo_get_mode,
.set_mode = hi6421_regulator_ldo_set_mode,
.get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode,
};
static const struct regulator_ops hi6421_buck012_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_regulator_enable,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_regulator_buck_get_mode,
.set_mode = hi6421_regulator_buck_set_mode,
};
static const struct regulator_ops hi6421_buck345_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_regulator_enable,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_regulator_buck_get_mode,
.set_mode = hi6421_regulator_buck_set_mode,
};
static int hi6421_regulator_probe(struct platform_device *pdev)
{
struct hi6421_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct hi6421_regulator_pdata *pdata;
struct hi6421_regulator_info *info;
struct regulator_config config = { };
struct regulator_dev *rdev;
int i;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
mutex_init(&pdata->lock);
for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
/* assign per-regulator data */
info = &hi6421_regulator_info[i];
config.dev = pdev->dev.parent;
config.driver_data = pdata;
config.regmap = pmic->regmap;
rdev = devm_regulator_register(&pdev->dev, &info->desc,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id hi6421_regulator_table[] = {
{ .name = "hi6421-regulator" },
{},
};
MODULE_DEVICE_TABLE(platform, hi6421_regulator_table);
static struct platform_driver hi6421_regulator_driver = {
.id_table = hi6421_regulator_table,
.driver = {
.name = "hi6421-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = hi6421_regulator_probe,
};
module_platform_driver(hi6421_regulator_driver);
MODULE_AUTHOR("Guodong Xu <[email protected]>");
MODULE_DESCRIPTION("Hi6421 regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/hi6421-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// arizona-ldo1.c -- LDO1 supply for Arizona devices
//
// Copyright 2012 Wolfson Microelectronics PLC.
//
// Author: Mark Brown <[email protected]>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
#include <linux/regulator/arizona-ldo1.h>
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
#include <linux/mfd/madera/core.h>
#include <linux/mfd/madera/pdata.h>
#include <linux/mfd/madera/registers.h>
struct arizona_ldo1 {
struct regulator_dev *regulator;
struct regmap *regmap;
struct regulator_consumer_supply supply;
struct regulator_init_data init_data;
struct gpio_desc *ena_gpiod;
};
static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
if (sel == rdev->desc->n_voltages - 1)
val = ARIZONA_LDO1_HI_PWR;
else
val = 0;
ret = regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_2,
ARIZONA_LDO1_HI_PWR, val);
if (ret != 0)
return ret;
if (val)
return 0;
return regulator_set_voltage_sel_regmap(rdev, sel);
}
static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_2, &val);
if (ret != 0)
return ret;
if (val & ARIZONA_LDO1_HI_PWR)
return rdev->desc->n_voltages - 1;
return regulator_get_voltage_sel_regmap(rdev);
}
static const struct regulator_ops arizona_ldo1_hc_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = arizona_ldo1_hc_get_voltage_sel,
.set_voltage_sel = arizona_ldo1_hc_set_voltage_sel,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
static const struct linear_range arizona_ldo1_hc_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 0x6, 50000),
REGULATOR_LINEAR_RANGE(1800000, 0x7, 0x7, 0),
};
static const struct regulator_desc arizona_ldo1_hc = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_hc_ops,
.vsel_reg = ARIZONA_LDO1_CONTROL_1,
.vsel_mask = ARIZONA_LDO1_VSEL_MASK,
.bypass_reg = ARIZONA_LDO1_CONTROL_1,
.bypass_mask = ARIZONA_LDO1_BYPASS,
.linear_ranges = arizona_ldo1_hc_ranges,
.n_linear_ranges = ARRAY_SIZE(arizona_ldo1_hc_ranges),
.n_voltages = 8,
.enable_time = 1500,
.ramp_delay = 24000,
.owner = THIS_MODULE,
};
static const struct regulator_ops arizona_ldo1_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_desc arizona_ldo1 = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_ops,
.vsel_reg = ARIZONA_LDO1_CONTROL_1,
.vsel_mask = ARIZONA_LDO1_VSEL_MASK,
.min_uV = 900000,
.uV_step = 25000,
.n_voltages = 13,
.enable_time = 500,
.ramp_delay = 24000,
.owner = THIS_MODULE,
};
static const struct regulator_init_data arizona_ldo1_dvfs = {
.constraints = {
.min_uV = 1200000,
.max_uV = 1800000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS |
REGULATOR_CHANGE_VOLTAGE,
},
.num_consumer_supplies = 1,
};
static const struct regulator_init_data arizona_ldo1_default = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
};
static const struct regulator_init_data arizona_ldo1_wm5110 = {
.constraints = {
.min_uV = 1175000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS |
REGULATOR_CHANGE_VOLTAGE,
},
.num_consumer_supplies = 1,
};
static const struct regulator_desc madera_ldo1 = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_ops,
.vsel_reg = MADERA_LDO1_CONTROL_1,
.vsel_mask = MADERA_LDO1_VSEL_MASK,
.min_uV = 900000,
.uV_step = 25000,
.n_voltages = 13,
.enable_time = 3000,
.owner = THIS_MODULE,
};
static const struct regulator_init_data madera_ldo1_default = {
.constraints = {
.min_uV = 1200000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
};
static int arizona_ldo1_of_get_pdata(struct arizona_ldo1_pdata *pdata,
struct regulator_config *config,
const struct regulator_desc *desc,
bool *external_dcvdd)
{
struct arizona_ldo1 *ldo1 = config->driver_data;
struct device_node *np = config->dev->of_node;
struct device_node *init_node, *dcvdd_node;
struct regulator_init_data *init_data;
init_node = of_get_child_by_name(np, "ldo1");
dcvdd_node = of_parse_phandle(np, "DCVDD-supply", 0);
if (init_node) {
config->of_node = init_node;
init_data = of_get_regulator_init_data(config->dev, init_node,
desc);
if (init_data) {
init_data->consumer_supplies = &ldo1->supply;
init_data->num_consumer_supplies = 1;
if (dcvdd_node && dcvdd_node != init_node)
*external_dcvdd = true;
pdata->init_data = init_data;
}
} else if (dcvdd_node) {
*external_dcvdd = true;
}
of_node_put(dcvdd_node);
return 0;
}
static int arizona_ldo1_common_init(struct platform_device *pdev,
struct arizona_ldo1 *ldo1,
const struct regulator_desc *desc,
struct arizona_ldo1_pdata *pdata,
bool *external_dcvdd)
{
struct device *parent_dev = pdev->dev.parent;
struct regulator_config config = { };
int ret;
*external_dcvdd = false;
ldo1->supply.supply = "DCVDD";
ldo1->init_data.consumer_supplies = &ldo1->supply;
ldo1->supply.dev_name = dev_name(parent_dev);
config.dev = parent_dev;
config.driver_data = ldo1;
config.regmap = ldo1->regmap;
if (IS_ENABLED(CONFIG_OF)) {
if (!dev_get_platdata(parent_dev)) {
ret = arizona_ldo1_of_get_pdata(pdata,
&config, desc,
external_dcvdd);
if (ret < 0)
return ret;
}
}
/* We assume that high output = regulator off
* Don't use devm, since we need to get against the parent device
* so clean up would happen at the wrong time
*/
config.ena_gpiod = gpiod_get_optional(parent_dev, "wlf,ldoena",
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(config.ena_gpiod))
return PTR_ERR(config.ena_gpiod);
ldo1->ena_gpiod = config.ena_gpiod;
if (pdata->init_data)
config.init_data = pdata->init_data;
else
config.init_data = &ldo1->init_data;
/*
* LDO1 can only be used to supply DCVDD so if it has no
* consumers then DCVDD is supplied externally.
*/
if (config.init_data->num_consumer_supplies == 0)
*external_dcvdd = true;
ldo1->regulator = devm_regulator_register(&pdev->dev, desc, &config);
of_node_put(config.of_node);
if (IS_ERR(ldo1->regulator)) {
ret = PTR_ERR(ldo1->regulator);
dev_err(&pdev->dev, "Failed to register LDO1 supply: %d\n",
ret);
return ret;
}
platform_set_drvdata(pdev, ldo1);
return 0;
}
static int arizona_ldo1_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_ldo1 *ldo1;
const struct regulator_desc *desc;
bool external_dcvdd;
int ret;
ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
if (!ldo1)
return -ENOMEM;
ldo1->regmap = arizona->regmap;
/*
* Since the chip usually supplies itself we provide some
* default init_data for it. This will be overridden with
* platform data if provided.
*/
switch (arizona->type) {
case WM5102:
case WM8997:
case WM8998:
case WM1814:
desc = &arizona_ldo1_hc;
ldo1->init_data = arizona_ldo1_dvfs;
break;
case WM5110:
case WM8280:
desc = &arizona_ldo1;
ldo1->init_data = arizona_ldo1_wm5110;
break;
default:
desc = &arizona_ldo1;
ldo1->init_data = arizona_ldo1_default;
break;
}
ret = arizona_ldo1_common_init(pdev, ldo1, desc,
&arizona->pdata.ldo1,
&external_dcvdd);
if (ret == 0)
arizona->external_dcvdd = external_dcvdd;
return ret;
}
static int arizona_ldo1_remove(struct platform_device *pdev)
{
struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
if (ldo1->ena_gpiod)
gpiod_put(ldo1->ena_gpiod);
return 0;
}
static int madera_ldo1_probe(struct platform_device *pdev)
{
struct madera *madera = dev_get_drvdata(pdev->dev.parent);
struct arizona_ldo1 *ldo1;
bool external_dcvdd;
int ret;
ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
if (!ldo1)
return -ENOMEM;
ldo1->regmap = madera->regmap;
ldo1->init_data = madera_ldo1_default;
ret = arizona_ldo1_common_init(pdev, ldo1, &madera_ldo1,
&madera->pdata.ldo1,
&external_dcvdd);
if (ret)
return ret;
madera->internal_dcvdd = !external_dcvdd;
return 0;
}
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
.remove = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
static struct platform_driver madera_ldo1_driver = {
.probe = madera_ldo1_probe,
.remove = arizona_ldo1_remove,
.driver = {
.name = "madera-ldo1",
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
static struct platform_driver * const madera_ldo1_drivers[] = {
&arizona_ldo1_driver,
&madera_ldo1_driver,
};
static int __init arizona_ldo1_init(void)
{
return platform_register_drivers(madera_ldo1_drivers,
ARRAY_SIZE(madera_ldo1_drivers));
}
module_init(arizona_ldo1_init);
static void __exit madera_ldo1_exit(void)
{
platform_unregister_drivers(madera_ldo1_drivers,
ARRAY_SIZE(madera_ldo1_drivers));
}
module_exit(madera_ldo1_exit);
/* Module information */
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_DESCRIPTION("Arizona LDO1 driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:arizona-ldo1");
MODULE_ALIAS("platform:madera-ldo1");
| linux-master | drivers/regulator/arizona-ldo1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for National Semiconductors LP3972 PMIC chip
*
* Based on lp3971.c
*/
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/lp3972.h>
#include <linux/slab.h>
struct lp3972 {
struct device *dev;
struct mutex io_lock;
struct i2c_client *i2c;
};
/* LP3972 Control Registers */
#define LP3972_SCR_REG 0x07
#define LP3972_OVER1_REG 0x10
#define LP3972_OVSR1_REG 0x11
#define LP3972_OVER2_REG 0x12
#define LP3972_OVSR2_REG 0x13
#define LP3972_VCC1_REG 0x20
#define LP3972_ADTV1_REG 0x23
#define LP3972_ADTV2_REG 0x24
#define LP3972_AVRC_REG 0x25
#define LP3972_CDTC1_REG 0x26
#define LP3972_CDTC2_REG 0x27
#define LP3972_SDTV1_REG 0x29
#define LP3972_SDTV2_REG 0x2A
#define LP3972_MDTV1_REG 0x32
#define LP3972_MDTV2_REG 0x33
#define LP3972_L2VCR_REG 0x39
#define LP3972_L34VCR_REG 0x3A
#define LP3972_SCR1_REG 0x80
#define LP3972_SCR2_REG 0x81
#define LP3972_OEN3_REG 0x82
#define LP3972_OSR3_REG 0x83
#define LP3972_LOER4_REG 0x84
#define LP3972_B2TV_REG 0x85
#define LP3972_B3TV_REG 0x86
#define LP3972_B32RC_REG 0x87
#define LP3972_ISRA_REG 0x88
#define LP3972_BCCR_REG 0x89
#define LP3972_II1RR_REG 0x8E
#define LP3972_II2RR_REG 0x8F
#define LP3972_SYS_CONTROL1_REG LP3972_SCR1_REG
/* System control register 1 initial value,
* bits 5, 6 and 7 are EPROM programmable */
#define SYS_CONTROL1_INIT_VAL 0x02
#define SYS_CONTROL1_INIT_MASK 0x1F
#define LP3972_VOL_CHANGE_REG LP3972_VCC1_REG
#define LP3972_VOL_CHANGE_FLAG_GO 0x01
#define LP3972_VOL_CHANGE_FLAG_MASK 0x03
/* LDO output enable mask */
#define LP3972_OEN3_L1EN BIT(0)
#define LP3972_OVER2_LDO2_EN BIT(2)
#define LP3972_OVER2_LDO3_EN BIT(3)
#define LP3972_OVER2_LDO4_EN BIT(4)
#define LP3972_OVER1_S_EN BIT(2)
static const unsigned int ldo1_voltage_map[] = {
1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000,
1900000, 1925000, 1950000, 1975000, 2000000,
};
static const unsigned int ldo23_voltage_map[] = {
1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000,
};
static const unsigned int ldo4_voltage_map[] = {
1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000,
1400000, 1500000, 1800000, 1900000, 2500000, 2800000, 3000000, 3300000,
};
static const unsigned int ldo5_voltage_map[] = {
0, 0, 0, 0, 0, 850000, 875000, 900000,
925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
static const unsigned int buck1_voltage_map[] = {
725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
static const unsigned int buck23_voltage_map[] = {
0, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
1550000, 1600000, 1650000, 1700000, 1800000, 1900000, 2500000, 2800000,
3000000, 3300000,
};
static const int ldo_output_enable_mask[] = {
LP3972_OEN3_L1EN,
LP3972_OVER2_LDO2_EN,
LP3972_OVER2_LDO3_EN,
LP3972_OVER2_LDO4_EN,
LP3972_OVER1_S_EN,
};
static const int ldo_output_enable_addr[] = {
LP3972_OEN3_REG,
LP3972_OVER2_REG,
LP3972_OVER2_REG,
LP3972_OVER2_REG,
LP3972_OVER1_REG,
};
static const int ldo_vol_ctl_addr[] = {
LP3972_MDTV1_REG,
LP3972_L2VCR_REG,
LP3972_L34VCR_REG,
LP3972_L34VCR_REG,
LP3972_SDTV1_REG,
};
static const int buck_vol_enable_addr[] = {
LP3972_OVER1_REG,
LP3972_OEN3_REG,
LP3972_OEN3_REG,
};
static const int buck_base_addr[] = {
LP3972_ADTV1_REG,
LP3972_B2TV_REG,
LP3972_B3TV_REG,
};
#define LP3972_LDO_OUTPUT_ENABLE_MASK(x) (ldo_output_enable_mask[x])
#define LP3972_LDO_OUTPUT_ENABLE_REG(x) (ldo_output_enable_addr[x])
/* LDO voltage control registers shift:
LP3972_LDO1 -> 0, LP3972_LDO2 -> 4
LP3972_LDO3 -> 0, LP3972_LDO4 -> 4
LP3972_LDO5 -> 0
*/
#define LP3972_LDO_VOL_CONTR_SHIFT(x) (((x) & 1) << 2)
#define LP3972_LDO_VOL_CONTR_REG(x) (ldo_vol_ctl_addr[x])
#define LP3972_LDO_VOL_CHANGE_SHIFT(x) ((x) ? 4 : 6)
#define LP3972_LDO_VOL_MASK(x) (((x) % 4) ? 0x0f : 0x1f)
#define LP3972_LDO_VOL_MIN_IDX(x) (((x) == 4) ? 0x05 : 0x00)
#define LP3972_LDO_VOL_MAX_IDX(x) ((x) ? (((x) == 4) ? 0x1f : 0x0f) : 0x0c)
#define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x])
#define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x])
#define LP3972_BUCK_VOL_MASK 0x1f
static int lp3972_i2c_read(struct i2c_client *i2c, char reg, int count,
u16 *dest)
{
int ret;
if (count != 1)
return -EIO;
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret < 0)
return ret;
*dest = ret;
return 0;
}
static int lp3972_i2c_write(struct i2c_client *i2c, char reg, int count,
const u16 *src)
{
if (count != 1)
return -EIO;
return i2c_smbus_write_byte_data(i2c, reg, *src);
}
static u8 lp3972_reg_read(struct lp3972 *lp3972, u8 reg)
{
u16 val = 0;
mutex_lock(&lp3972->io_lock);
lp3972_i2c_read(lp3972->i2c, reg, 1, &val);
dev_dbg(lp3972->dev, "reg read 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val & 0xff);
mutex_unlock(&lp3972->io_lock);
return val & 0xff;
}
static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val)
{
u16 tmp;
int ret;
mutex_lock(&lp3972->io_lock);
ret = lp3972_i2c_read(lp3972->i2c, reg, 1, &tmp);
if (ret == 0) {
tmp = (tmp & ~mask) | val;
ret = lp3972_i2c_write(lp3972->i2c, reg, 1, &tmp);
dev_dbg(lp3972->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val & 0xff);
}
mutex_unlock(&lp3972->io_lock);
return ret;
}
static int lp3972_ldo_is_enabled(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3972_LDO1;
u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo);
u16 val;
val = lp3972_reg_read(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo));
return !!(val & mask);
}
static int lp3972_ldo_enable(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3972_LDO1;
u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo);
return lp3972_set_bits(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo),
mask, mask);
}
static int lp3972_ldo_disable(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3972_LDO1;
u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo);
return lp3972_set_bits(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo),
mask, 0);
}
static int lp3972_ldo_get_voltage_sel(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3972_LDO1;
u16 mask = LP3972_LDO_VOL_MASK(ldo);
u16 val, reg;
reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask;
return val;
}
static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
unsigned int selector)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3972_LDO1;
int shift, ret;
shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo);
ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo),
LP3972_LDO_VOL_MASK(ldo) << shift, selector << shift);
if (ret)
return ret;
/*
* LDO1 and LDO5 support voltage control by either target voltage1
* or target voltage2 register.
* We use target voltage1 register for LDO1 and LDO5 in this driver.
* We need to update voltage change control register(0x20) to enable
* LDO1 and LDO5 to change to their programmed target values.
*/
switch (ldo) {
case LP3972_LDO1:
case LP3972_LDO5:
shift = LP3972_LDO_VOL_CHANGE_SHIFT(ldo);
ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
LP3972_VOL_CHANGE_FLAG_MASK << shift,
LP3972_VOL_CHANGE_FLAG_GO << shift);
if (ret)
return ret;
ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
LP3972_VOL_CHANGE_FLAG_MASK << shift, 0);
break;
}
return ret;
}
static const struct regulator_ops lp3972_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_ldo_is_enabled,
.enable = lp3972_ldo_enable,
.disable = lp3972_ldo_disable,
.get_voltage_sel = lp3972_ldo_get_voltage_sel,
.set_voltage_sel = lp3972_ldo_set_voltage_sel,
};
static int lp3972_dcdc_is_enabled(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3972_DCDC1;
u16 mask = 1 << (buck * 2);
u16 val;
val = lp3972_reg_read(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck));
return !!(val & mask);
}
static int lp3972_dcdc_enable(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3972_DCDC1;
u16 mask = 1 << (buck * 2);
u16 val;
val = lp3972_set_bits(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck),
mask, mask);
return val;
}
static int lp3972_dcdc_disable(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3972_DCDC1;
u16 mask = 1 << (buck * 2);
u16 val;
val = lp3972_set_bits(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck),
mask, 0);
return val;
}
static int lp3972_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3972_DCDC1;
u16 reg;
reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck));
reg &= LP3972_BUCK_VOL_MASK;
return reg;
}
static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
unsigned int selector)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3972_DCDC1;
int ret;
ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck),
LP3972_BUCK_VOL_MASK, selector);
if (ret)
return ret;
if (buck != 0)
return ret;
ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
LP3972_VOL_CHANGE_FLAG_MASK, LP3972_VOL_CHANGE_FLAG_GO);
if (ret)
return ret;
return lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG,
LP3972_VOL_CHANGE_FLAG_MASK, 0);
}
static const struct regulator_ops lp3972_dcdc_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_dcdc_is_enabled,
.enable = lp3972_dcdc_enable,
.disable = lp3972_dcdc_disable,
.get_voltage_sel = lp3972_dcdc_get_voltage_sel,
.set_voltage_sel = lp3972_dcdc_set_voltage_sel,
};
static const struct regulator_desc regulators[] = {
{
.name = "LDO1",
.id = LP3972_LDO1,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo1_voltage_map),
.volt_table = ldo1_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO2",
.id = LP3972_LDO2,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo23_voltage_map),
.volt_table = ldo23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO3",
.id = LP3972_LDO3,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo23_voltage_map),
.volt_table = ldo23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO4",
.id = LP3972_LDO4,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo4_voltage_map),
.volt_table = ldo4_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO5",
.id = LP3972_LDO5,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo5_voltage_map),
.volt_table = ldo5_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC1",
.id = LP3972_DCDC1,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck1_voltage_map),
.volt_table = buck1_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC2",
.id = LP3972_DCDC2,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck23_voltage_map),
.volt_table = buck23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC3",
.id = LP3972_DCDC3,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck23_voltage_map),
.volt_table = buck23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
};
static int setup_regulators(struct lp3972 *lp3972,
struct lp3972_platform_data *pdata)
{
int i, err;
/* Instantiate the regulators */
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
struct regulator_config config = { };
struct regulator_dev *rdev;
config.dev = lp3972->dev;
config.init_data = reg->initdata;
config.driver_data = lp3972;
rdev = devm_regulator_register(lp3972->dev,
®ulators[reg->id], &config);
if (IS_ERR(rdev)) {
err = PTR_ERR(rdev);
dev_err(lp3972->dev, "regulator init failed: %d\n",
err);
return err;
}
}
return 0;
}
static int lp3972_i2c_probe(struct i2c_client *i2c)
{
struct lp3972 *lp3972;
struct lp3972_platform_data *pdata = dev_get_platdata(&i2c->dev);
int ret;
u16 val;
if (!pdata) {
dev_dbg(&i2c->dev, "No platform init data supplied\n");
return -ENODEV;
}
lp3972 = devm_kzalloc(&i2c->dev, sizeof(struct lp3972), GFP_KERNEL);
if (!lp3972)
return -ENOMEM;
lp3972->i2c = i2c;
lp3972->dev = &i2c->dev;
mutex_init(&lp3972->io_lock);
/* Detect LP3972 */
ret = lp3972_i2c_read(i2c, LP3972_SYS_CONTROL1_REG, 1, &val);
if (ret == 0 &&
(val & SYS_CONTROL1_INIT_MASK) != SYS_CONTROL1_INIT_VAL) {
ret = -ENODEV;
dev_err(&i2c->dev, "chip reported: val = 0x%x\n", val);
}
if (ret < 0) {
dev_err(&i2c->dev, "failed to detect device. ret = %d\n", ret);
return ret;
}
ret = setup_regulators(lp3972, pdata);
if (ret < 0)
return ret;
i2c_set_clientdata(i2c, lp3972);
return 0;
}
static const struct i2c_device_id lp3972_i2c_id[] = {
{ "lp3972", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp3972_i2c_id);
static struct i2c_driver lp3972_i2c_driver = {
.driver = {
.name = "lp3972",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = lp3972_i2c_probe,
.id_table = lp3972_i2c_id,
};
static int __init lp3972_module_init(void)
{
return i2c_add_driver(&lp3972_i2c_driver);
}
subsys_initcall(lp3972_module_init);
static void __exit lp3972_module_exit(void)
{
i2c_del_driver(&lp3972_i2c_driver);
}
module_exit(lp3972_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Axel Lin <[email protected]>");
MODULE_DESCRIPTION("LP3972 PMIC driver");
| linux-master | drivers/regulator/lp3972.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) ST-Ericsson SA 2010
*
* Authors: Sundar Iyer <[email protected]> for ST-Ericsson
* Bengt Jonsson <[email protected]> for ST-Ericsson
*
* Power domain regulators on DB8500
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/of_regulator.h>
#include <linux/of.h>
#include <linux/module.h>
#include "dbx500-prcmu.h"
static int db8500_regulator_enable(struct regulator_dev *rdev)
{
struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
info->desc.name);
if (!info->is_enabled) {
info->is_enabled = true;
if (!info->exclude_from_power_state)
power_state_active_enable();
}
return 0;
}
static int db8500_regulator_disable(struct regulator_dev *rdev)
{
struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret = 0;
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
info->desc.name);
if (info->is_enabled) {
info->is_enabled = false;
if (!info->exclude_from_power_state)
ret = power_state_active_disable();
}
return ret;
}
static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
{
struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):"
" %i\n", info->desc.name, info->is_enabled);
return info->is_enabled;
}
/* db8500 regulator operations */
static const struct regulator_ops db8500_regulator_ops = {
.enable = db8500_regulator_enable,
.disable = db8500_regulator_disable,
.is_enabled = db8500_regulator_is_enabled,
};
/*
* EPOD control
*/
static bool epod_on[NUM_EPOD_ID];
static bool epod_ramret[NUM_EPOD_ID];
static int enable_epod(u16 epod_id, bool ramret)
{
int ret;
if (ramret) {
if (!epod_on[epod_id]) {
ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
if (ret < 0)
return ret;
}
epod_ramret[epod_id] = true;
} else {
ret = prcmu_set_epod(epod_id, EPOD_STATE_ON);
if (ret < 0)
return ret;
epod_on[epod_id] = true;
}
return 0;
}
static int disable_epod(u16 epod_id, bool ramret)
{
int ret;
if (ramret) {
if (!epod_on[epod_id]) {
ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
if (ret < 0)
return ret;
}
epod_ramret[epod_id] = false;
} else {
if (epod_ramret[epod_id]) {
ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
if (ret < 0)
return ret;
} else {
ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
if (ret < 0)
return ret;
}
epod_on[epod_id] = false;
}
return 0;
}
/*
* Regulator switch
*/
static int db8500_regulator_switch_enable(struct regulator_dev *rdev)
{
struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n",
info->desc.name);
ret = enable_epod(info->epod_id, info->is_ramret);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"regulator-switch-%s-enable: prcmu call failed\n",
info->desc.name);
goto out;
}
info->is_enabled = true;
out:
return ret;
}
static int db8500_regulator_switch_disable(struct regulator_dev *rdev)
{
struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n",
info->desc.name);
ret = disable_epod(info->epod_id, info->is_ramret);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"regulator_switch-%s-disable: prcmu call failed\n",
info->desc.name);
goto out;
}
info->is_enabled = false;
out:
return ret;
}
static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
{
struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev),
"regulator-switch-%s-is_enabled (is_enabled): %i\n",
info->desc.name, info->is_enabled);
return info->is_enabled;
}
static const struct regulator_ops db8500_regulator_switch_ops = {
.enable = db8500_regulator_switch_enable,
.disable = db8500_regulator_switch_disable,
.is_enabled = db8500_regulator_switch_is_enabled,
};
/*
* Regulator information
*/
static struct dbx500_regulator_info
dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
.of_match = of_match_ptr("db8500_vape"),
.id = DB8500_REGULATOR_VAPE,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VARM] = {
.desc = {
.name = "db8500-varm",
.of_match = of_match_ptr("db8500_varm"),
.id = DB8500_REGULATOR_VARM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VMODEM] = {
.desc = {
.name = "db8500-vmodem",
.of_match = of_match_ptr("db8500_vmodem"),
.id = DB8500_REGULATOR_VMODEM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VPLL] = {
.desc = {
.name = "db8500-vpll",
.of_match = of_match_ptr("db8500_vpll"),
.id = DB8500_REGULATOR_VPLL,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VSMPS1] = {
.desc = {
.name = "db8500-vsmps1",
.of_match = of_match_ptr("db8500_vsmps1"),
.id = DB8500_REGULATOR_VSMPS1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VSMPS2] = {
.desc = {
.name = "db8500-vsmps2",
.of_match = of_match_ptr("db8500_vsmps2"),
.id = DB8500_REGULATOR_VSMPS2,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.fixed_uV = 1800000,
.n_voltages = 1,
},
.exclude_from_power_state = true,
},
[DB8500_REGULATOR_VSMPS3] = {
.desc = {
.name = "db8500-vsmps3",
.of_match = of_match_ptr("db8500_vsmps3"),
.id = DB8500_REGULATOR_VSMPS3,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VRF1] = {
.desc = {
.name = "db8500-vrf1",
.of_match = of_match_ptr("db8500_vrf1"),
.id = DB8500_REGULATOR_VRF1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
.desc = {
.name = "db8500-sva-mmdsp",
.of_match = of_match_ptr("db8500_sva_mmdsp"),
.id = DB8500_REGULATOR_SWITCH_SVAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SVAMMDSP,
},
[DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
.desc = {
.name = "db8500-sva-mmdsp-ret",
.of_match = of_match_ptr("db8500_sva_mmdsp_ret"),
.id = DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SVAMMDSP,
.is_ramret = true,
},
[DB8500_REGULATOR_SWITCH_SVAPIPE] = {
.desc = {
.name = "db8500-sva-pipe",
.of_match = of_match_ptr("db8500_sva_pipe"),
.id = DB8500_REGULATOR_SWITCH_SVAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SVAPIPE,
},
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
.desc = {
.name = "db8500-sia-mmdsp",
.of_match = of_match_ptr("db8500_sia_mmdsp"),
.id = DB8500_REGULATOR_SWITCH_SIAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SIAMMDSP,
},
[DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
.desc = {
.name = "db8500-sia-mmdsp-ret",
.of_match = of_match_ptr("db8500_sia_mmdsp_ret"),
.id = DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SIAMMDSP,
.is_ramret = true,
},
[DB8500_REGULATOR_SWITCH_SIAPIPE] = {
.desc = {
.name = "db8500-sia-pipe",
.of_match = of_match_ptr("db8500_sia_pipe"),
.id = DB8500_REGULATOR_SWITCH_SIAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SIAPIPE,
},
[DB8500_REGULATOR_SWITCH_SGA] = {
.desc = {
.name = "db8500-sga",
.of_match = of_match_ptr("db8500_sga"),
.id = DB8500_REGULATOR_SWITCH_SGA,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SGA,
},
[DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
.desc = {
.name = "db8500-b2r2-mcde",
.of_match = of_match_ptr("db8500_b2r2_mcde"),
.id = DB8500_REGULATOR_SWITCH_B2R2_MCDE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_B2R2_MCDE,
},
[DB8500_REGULATOR_SWITCH_ESRAM12] = {
.desc = {
.name = "db8500-esram12",
.of_match = of_match_ptr("db8500_esram12"),
.id = DB8500_REGULATOR_SWITCH_ESRAM12,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM12,
.is_enabled = true,
},
[DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
.desc = {
.name = "db8500-esram12-ret",
.of_match = of_match_ptr("db8500_esram12_ret"),
.id = DB8500_REGULATOR_SWITCH_ESRAM12RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM12,
.is_ramret = true,
},
[DB8500_REGULATOR_SWITCH_ESRAM34] = {
.desc = {
.name = "db8500-esram34",
.of_match = of_match_ptr("db8500_esram34"),
.id = DB8500_REGULATOR_SWITCH_ESRAM34,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM34,
.is_enabled = true,
},
[DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
.desc = {
.name = "db8500-esram34-ret",
.of_match = of_match_ptr("db8500_esram34_ret"),
.id = DB8500_REGULATOR_SWITCH_ESRAM34RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM34,
.is_ramret = true,
},
};
static int db8500_regulator_probe(struct platform_device *pdev)
{
struct regulator_init_data *db8500_init_data;
struct dbx500_regulator_info *info;
struct regulator_config config = { };
struct regulator_dev *rdev;
int err, i;
db8500_init_data = dev_get_platdata(&pdev->dev);
for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
/* assign per-regulator data */
info = &dbx500_regulator_info[i];
config.driver_data = info;
config.dev = &pdev->dev;
if (db8500_init_data)
config.init_data = &db8500_init_data[i];
rdev = devm_regulator_register(&pdev->dev, &info->desc,
&config);
if (IS_ERR(rdev)) {
err = PTR_ERR(rdev);
dev_err(&pdev->dev, "failed to register %s: err %i\n",
info->desc.name, err);
return err;
}
dev_dbg(&pdev->dev, "regulator-%s-probed\n", info->desc.name);
}
ux500_regulator_debug_init(pdev, dbx500_regulator_info,
ARRAY_SIZE(dbx500_regulator_info));
return 0;
}
static int db8500_regulator_remove(struct platform_device *pdev)
{
ux500_regulator_debug_exit();
return 0;
}
static struct platform_driver db8500_regulator_driver = {
.driver = {
.name = "db8500-prcmu-regulators",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = db8500_regulator_probe,
.remove = db8500_regulator_remove,
};
static int __init db8500_regulator_init(void)
{
return platform_driver_register(&db8500_regulator_driver);
}
static void __exit db8500_regulator_exit(void)
{
platform_driver_unregister(&db8500_regulator_driver);
}
arch_initcall(db8500_regulator_init);
module_exit(db8500_regulator_exit);
MODULE_AUTHOR("STMicroelectronics/ST-Ericsson");
MODULE_DESCRIPTION("DB8500 regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/db8500-prcmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Regulator driver for PWM Regulators
*
* Copyright (C) 2014 - STMicroelectronics Inc.
*
* Author: Lee Jones <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/of.h>
#include <linux/pwm.h>
#include <linux/gpio/consumer.h>
struct pwm_continuous_reg_data {
unsigned int min_uV_dutycycle;
unsigned int max_uV_dutycycle;
unsigned int dutycycle_unit;
};
struct pwm_regulator_data {
/* Shared */
struct pwm_device *pwm;
/* Voltage table */
struct pwm_voltages *duty_cycle_table;
/* Continuous mode info */
struct pwm_continuous_reg_data continuous;
/* regulator descriptor */
struct regulator_desc desc;
int state;
/* Enable GPIO */
struct gpio_desc *enb_gpio;
};
struct pwm_voltages {
unsigned int uV;
unsigned int dutycycle;
};
/*
* Voltage table call-backs
*/
static void pwm_regulator_init_state(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
struct pwm_state pwm_state;
unsigned int dutycycle;
int i;
pwm_get_state(drvdata->pwm, &pwm_state);
dutycycle = pwm_get_relative_duty_cycle(&pwm_state, 100);
for (i = 0; i < rdev->desc->n_voltages; i++) {
if (dutycycle == drvdata->duty_cycle_table[i].dutycycle) {
drvdata->state = i;
return;
}
}
}
static int pwm_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
if (drvdata->state < 0)
pwm_regulator_init_state(rdev);
return drvdata->state;
}
static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
struct pwm_state pstate;
int ret;
pwm_init_state(drvdata->pwm, &pstate);
pwm_set_relative_duty_cycle(&pstate,
drvdata->duty_cycle_table[selector].dutycycle, 100);
ret = pwm_apply_state(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
drvdata->state = selector;
return 0;
}
static int pwm_regulator_list_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
if (selector >= rdev->desc->n_voltages)
return -EINVAL;
return drvdata->duty_cycle_table[selector].uV;
}
static int pwm_regulator_enable(struct regulator_dev *dev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
gpiod_set_value_cansleep(drvdata->enb_gpio, 1);
return pwm_enable(drvdata->pwm);
}
static int pwm_regulator_disable(struct regulator_dev *dev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
pwm_disable(drvdata->pwm);
gpiod_set_value_cansleep(drvdata->enb_gpio, 0);
return 0;
}
static int pwm_regulator_is_enabled(struct regulator_dev *dev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
if (drvdata->enb_gpio && !gpiod_get_value_cansleep(drvdata->enb_gpio))
return false;
return pwm_is_enabled(drvdata->pwm);
}
static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
unsigned int min_uV_duty = drvdata->continuous.min_uV_dutycycle;
unsigned int max_uV_duty = drvdata->continuous.max_uV_dutycycle;
unsigned int duty_unit = drvdata->continuous.dutycycle_unit;
int min_uV = rdev->constraints->min_uV;
int max_uV = rdev->constraints->max_uV;
int diff_uV = max_uV - min_uV;
struct pwm_state pstate;
unsigned int diff_duty;
unsigned int voltage;
pwm_get_state(drvdata->pwm, &pstate);
voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
/*
* The dutycycle for min_uV might be greater than the one for max_uV.
* This is happening when the user needs an inversed polarity, but the
* PWM device does not support inversing it in hardware.
*/
if (max_uV_duty < min_uV_duty) {
voltage = min_uV_duty - voltage;
diff_duty = min_uV_duty - max_uV_duty;
} else {
voltage = voltage - min_uV_duty;
diff_duty = max_uV_duty - min_uV_duty;
}
voltage = DIV_ROUND_CLOSEST_ULL((u64)voltage * diff_uV, diff_duty);
return voltage + min_uV;
}
static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
int req_min_uV, int req_max_uV,
unsigned int *selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
unsigned int min_uV_duty = drvdata->continuous.min_uV_dutycycle;
unsigned int max_uV_duty = drvdata->continuous.max_uV_dutycycle;
unsigned int duty_unit = drvdata->continuous.dutycycle_unit;
int min_uV = rdev->constraints->min_uV;
int max_uV = rdev->constraints->max_uV;
int diff_uV = max_uV - min_uV;
struct pwm_state pstate;
unsigned int diff_duty;
unsigned int dutycycle;
int ret;
pwm_init_state(drvdata->pwm, &pstate);
/*
* The dutycycle for min_uV might be greater than the one for max_uV.
* This is happening when the user needs an inversed polarity, but the
* PWM device does not support inversing it in hardware.
*/
if (max_uV_duty < min_uV_duty)
diff_duty = min_uV_duty - max_uV_duty;
else
diff_duty = max_uV_duty - min_uV_duty;
dutycycle = DIV_ROUND_CLOSEST_ULL((u64)(req_min_uV - min_uV) *
diff_duty,
diff_uV);
if (max_uV_duty < min_uV_duty)
dutycycle = min_uV_duty - dutycycle;
else
dutycycle = min_uV_duty + dutycycle;
pwm_set_relative_duty_cycle(&pstate, dutycycle, duty_unit);
ret = pwm_apply_state(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
return 0;
}
static const struct regulator_ops pwm_regulator_voltage_table_ops = {
.set_voltage_sel = pwm_regulator_set_voltage_sel,
.get_voltage_sel = pwm_regulator_get_voltage_sel,
.list_voltage = pwm_regulator_list_voltage,
.map_voltage = regulator_map_voltage_iterate,
.enable = pwm_regulator_enable,
.disable = pwm_regulator_disable,
.is_enabled = pwm_regulator_is_enabled,
};
static const struct regulator_ops pwm_regulator_voltage_continuous_ops = {
.get_voltage = pwm_regulator_get_voltage,
.set_voltage = pwm_regulator_set_voltage,
.enable = pwm_regulator_enable,
.disable = pwm_regulator_disable,
.is_enabled = pwm_regulator_is_enabled,
};
static const struct regulator_desc pwm_regulator_desc = {
.name = "pwm-regulator",
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.supply_name = "pwm",
};
static int pwm_regulator_init_table(struct platform_device *pdev,
struct pwm_regulator_data *drvdata)
{
struct device_node *np = pdev->dev.of_node;
struct pwm_voltages *duty_cycle_table;
unsigned int length = 0;
int ret;
of_find_property(np, "voltage-table", &length);
if ((length < sizeof(*duty_cycle_table)) ||
(length % sizeof(*duty_cycle_table))) {
dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
length);
return -EINVAL;
}
duty_cycle_table = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
if (!duty_cycle_table)
return -ENOMEM;
ret = of_property_read_u32_array(np, "voltage-table",
(u32 *)duty_cycle_table,
length / sizeof(u32));
if (ret) {
dev_err(&pdev->dev, "Failed to read voltage-table: %d\n", ret);
return ret;
}
drvdata->state = -ENOTRECOVERABLE;
drvdata->duty_cycle_table = duty_cycle_table;
drvdata->desc.ops = &pwm_regulator_voltage_table_ops;
drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table);
return 0;
}
static int pwm_regulator_init_continuous(struct platform_device *pdev,
struct pwm_regulator_data *drvdata)
{
u32 dutycycle_range[2] = { 0, 100 };
u32 dutycycle_unit = 100;
drvdata->desc.ops = &pwm_regulator_voltage_continuous_ops;
drvdata->desc.continuous_voltage_range = true;
of_property_read_u32_array(pdev->dev.of_node,
"pwm-dutycycle-range",
dutycycle_range, 2);
of_property_read_u32(pdev->dev.of_node, "pwm-dutycycle-unit",
&dutycycle_unit);
if (dutycycle_range[0] > dutycycle_unit ||
dutycycle_range[1] > dutycycle_unit)
return -EINVAL;
drvdata->continuous.dutycycle_unit = dutycycle_unit;
drvdata->continuous.min_uV_dutycycle = dutycycle_range[0];
drvdata->continuous.max_uV_dutycycle = dutycycle_range[1];
return 0;
}
static int pwm_regulator_probe(struct platform_device *pdev)
{
const struct regulator_init_data *init_data;
struct pwm_regulator_data *drvdata;
struct regulator_dev *regulator;
struct regulator_config config = { };
struct device_node *np = pdev->dev.of_node;
enum gpiod_flags gpio_flags;
int ret;
if (!np) {
dev_err(&pdev->dev, "Device Tree node missing\n");
return -EINVAL;
}
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
memcpy(&drvdata->desc, &pwm_regulator_desc, sizeof(drvdata->desc));
if (of_property_present(np, "voltage-table"))
ret = pwm_regulator_init_table(pdev, drvdata);
else
ret = pwm_regulator_init_continuous(pdev, drvdata);
if (ret)
return ret;
init_data = of_get_regulator_init_data(&pdev->dev, np,
&drvdata->desc);
if (!init_data)
return -ENOMEM;
config.of_node = np;
config.dev = &pdev->dev;
config.driver_data = drvdata;
config.init_data = init_data;
drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(drvdata->pwm))
return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->pwm),
"Failed to get PWM\n");
if (init_data->constraints.boot_on || init_data->constraints.always_on)
gpio_flags = GPIOD_OUT_HIGH;
else
gpio_flags = GPIOD_OUT_LOW;
drvdata->enb_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
gpio_flags);
if (IS_ERR(drvdata->enb_gpio)) {
ret = PTR_ERR(drvdata->enb_gpio);
dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n", ret);
return ret;
}
ret = pwm_adjust_config(drvdata->pwm);
if (ret)
return ret;
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
dev_err(&pdev->dev, "Failed to register regulator %s: %d\n",
drvdata->desc.name, ret);
return ret;
}
return 0;
}
static const struct of_device_id __maybe_unused pwm_of_match[] = {
{ .compatible = "pwm-regulator" },
{ },
};
MODULE_DEVICE_TABLE(of, pwm_of_match);
static struct platform_driver pwm_regulator_driver = {
.driver = {
.name = "pwm-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pwm_of_match),
},
.probe = pwm_regulator_probe,
};
module_platform_driver(pwm_regulator_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lee Jones <[email protected]>");
MODULE_DESCRIPTION("PWM Regulator Driver");
MODULE_ALIAS("platform:pwm-regulator");
| linux-master | drivers/regulator/pwm-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Regulators driver for Dialog Semiconductor DA903x
//
// Copyright (C) 2006-2008 Marvell International Ltd.
// Copyright (C) 2008 Compulab Ltd.
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/da903x.h>
/* DA9030 Registers */
#define DA9030_INVAL (-1)
#define DA9030_LDO1011 (0x10)
#define DA9030_LDO15 (0x11)
#define DA9030_LDO1416 (0x12)
#define DA9030_LDO1819 (0x13)
#define DA9030_LDO17 (0x14)
#define DA9030_BUCK2DVM1 (0x15)
#define DA9030_BUCK2DVM2 (0x16)
#define DA9030_RCTL11 (0x17)
#define DA9030_RCTL21 (0x18)
#define DA9030_LDO1 (0x90)
#define DA9030_LDO23 (0x91)
#define DA9030_LDO45 (0x92)
#define DA9030_LDO6 (0x93)
#define DA9030_LDO78 (0x94)
#define DA9030_LDO912 (0x95)
#define DA9030_BUCK (0x96)
#define DA9030_RCTL12 (0x97)
#define DA9030_RCTL22 (0x98)
#define DA9030_LDO_UNLOCK (0xa0)
#define DA9030_LDO_UNLOCK_MASK (0xe0)
#define DA9034_OVER1 (0x10)
/* DA9034 Registers */
#define DA9034_INVAL (-1)
#define DA9034_OVER2 (0x11)
#define DA9034_OVER3 (0x12)
#define DA9034_LDO643 (0x13)
#define DA9034_LDO987 (0x14)
#define DA9034_LDO1110 (0x15)
#define DA9034_LDO1312 (0x16)
#define DA9034_LDO1514 (0x17)
#define DA9034_VCC1 (0x20)
#define DA9034_ADTV1 (0x23)
#define DA9034_ADTV2 (0x24)
#define DA9034_AVRC (0x25)
#define DA9034_CDTV1 (0x26)
#define DA9034_CDTV2 (0x27)
#define DA9034_CVRC (0x28)
#define DA9034_SDTV1 (0x29)
#define DA9034_SDTV2 (0x2a)
#define DA9034_SVRC (0x2b)
#define DA9034_MDTV1 (0x32)
#define DA9034_MDTV2 (0x33)
#define DA9034_MVRC (0x34)
/* DA9035 Registers. DA9034 Registers are comptabile to DA9035. */
#define DA9035_OVER3 (0x12)
#define DA9035_VCC2 (0x1f)
#define DA9035_3DTV1 (0x2c)
#define DA9035_3DTV2 (0x2d)
#define DA9035_3VRC (0x2e)
#define DA9035_AUTOSKIP (0x2f)
struct da903x_regulator_info {
struct regulator_desc desc;
int max_uV;
int vol_reg;
int vol_shift;
int vol_nbits;
int update_reg;
int update_bit;
int enable_reg;
int enable_bit;
};
static inline struct device *to_da903x_dev(struct regulator_dev *rdev)
{
return rdev_get_dev(rdev)->parent->parent;
}
static inline int check_range(struct da903x_regulator_info *info,
int min_uV, int max_uV)
{
if (min_uV < info->desc.min_uV || min_uV > info->max_uV)
return -EINVAL;
return 0;
}
/* DA9030/DA9034 common operations */
static int da903x_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
uint8_t val, mask;
if (rdev->desc->n_voltages == 1)
return -EINVAL;
val = selector << info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
return da903x_update(da9034_dev, info->vol_reg, val, mask);
}
static int da903x_get_voltage_sel(struct regulator_dev *rdev)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
uint8_t val, mask;
int ret;
if (rdev->desc->n_voltages == 1)
return 0;
ret = da903x_read(da9034_dev, info->vol_reg, &val);
if (ret)
return ret;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
val = (val & mask) >> info->vol_shift;
return val;
}
static int da903x_enable(struct regulator_dev *rdev)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
return da903x_set_bits(da9034_dev, info->enable_reg,
1 << info->enable_bit);
}
static int da903x_disable(struct regulator_dev *rdev)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
return da903x_clr_bits(da9034_dev, info->enable_reg,
1 << info->enable_bit);
}
static int da903x_is_enabled(struct regulator_dev *rdev)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
uint8_t reg_val;
int ret;
ret = da903x_read(da9034_dev, info->enable_reg, ®_val);
if (ret)
return ret;
return !!(reg_val & (1 << info->enable_bit));
}
/* DA9030 specific operations */
static int da9030_set_ldo1_15_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da903x_dev = to_da903x_dev(rdev);
uint8_t val, mask;
int ret;
val = selector << info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
val |= DA9030_LDO_UNLOCK; /* have to set UNLOCK bits */
mask |= DA9030_LDO_UNLOCK_MASK;
/* write twice */
ret = da903x_update(da903x_dev, info->vol_reg, val, mask);
if (ret)
return ret;
return da903x_update(da903x_dev, info->vol_reg, val, mask);
}
static int da9030_map_ldo14_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
int thresh, sel;
if (check_range(info, min_uV, max_uV)) {
pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
return -EINVAL;
}
thresh = (info->max_uV + info->desc.min_uV) / 2;
if (min_uV < thresh) {
sel = DIV_ROUND_UP(thresh - min_uV, info->desc.uV_step);
sel |= 0x4;
} else {
sel = DIV_ROUND_UP(min_uV - thresh, info->desc.uV_step);
}
return sel;
}
static int da9030_list_ldo14_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
int volt;
if (selector & 0x4)
volt = rdev->desc->min_uV +
rdev->desc->uV_step * (3 - (selector & ~0x4));
else
volt = (info->max_uV + rdev->desc->min_uV) / 2 +
rdev->desc->uV_step * (selector & ~0x4);
if (volt > info->max_uV)
return -EINVAL;
return volt;
}
/* DA9034 specific operations */
static int da9034_set_dvc_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
uint8_t val, mask;
int ret;
val = selector << info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
ret = da903x_update(da9034_dev, info->vol_reg, val, mask);
if (ret)
return ret;
ret = da903x_set_bits(da9034_dev, info->update_reg,
1 << info->update_bit);
return ret;
}
static const struct linear_range da9034_ldo12_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 50000),
REGULATOR_LINEAR_RANGE(2700000, 8, 15, 50000),
};
static const struct regulator_ops da903x_regulator_ldo_ops = {
.set_voltage_sel = da903x_set_voltage_sel,
.get_voltage_sel = da903x_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
};
/* NOTE: this is dedicated for the insane DA9030 LDO14 */
static const struct regulator_ops da9030_regulator_ldo14_ops = {
.set_voltage_sel = da903x_set_voltage_sel,
.get_voltage_sel = da903x_get_voltage_sel,
.list_voltage = da9030_list_ldo14_voltage,
.map_voltage = da9030_map_ldo14_voltage,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
};
/* NOTE: this is dedicated for the DA9030 LDO1 and LDO15 that have locks */
static const struct regulator_ops da9030_regulator_ldo1_15_ops = {
.set_voltage_sel = da9030_set_ldo1_15_voltage_sel,
.get_voltage_sel = da903x_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
};
static const struct regulator_ops da9034_regulator_dvc_ops = {
.set_voltage_sel = da9034_set_dvc_voltage_sel,
.get_voltage_sel = da903x_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
};
/* NOTE: this is dedicated for the insane LDO12 */
static const struct regulator_ops da9034_regulator_ldo12_ops = {
.set_voltage_sel = da903x_set_voltage_sel,
.get_voltage_sel = da903x_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
};
#define DA903x_LDO(_pmic, _id, min, max, step, vreg, shift, nbits, ereg, ebit) \
{ \
.desc = { \
.name = "LDO" #_id, \
.ops = &da903x_regulator_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _pmic##_ID_LDO##_id, \
.n_voltages = (step) ? ((max - min) / step + 1) : 1, \
.owner = THIS_MODULE, \
.min_uV = (min) * 1000, \
.uV_step = (step) * 1000, \
}, \
.max_uV = (max) * 1000, \
.vol_reg = _pmic##_##vreg, \
.vol_shift = (shift), \
.vol_nbits = (nbits), \
.enable_reg = _pmic##_##ereg, \
.enable_bit = (ebit), \
}
#define DA903x_DVC(_pmic, _id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
{ \
.desc = { \
.name = #_id, \
.ops = &da9034_regulator_dvc_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _pmic##_ID_##_id, \
.n_voltages = (step) ? ((max - min) / step + 1) : 1, \
.owner = THIS_MODULE, \
.min_uV = (min) * 1000, \
.uV_step = (step) * 1000, \
}, \
.max_uV = (max) * 1000, \
.vol_reg = _pmic##_##vreg, \
.vol_shift = (0), \
.vol_nbits = (nbits), \
.update_reg = _pmic##_##ureg, \
.update_bit = (ubit), \
.enable_reg = _pmic##_##ereg, \
.enable_bit = (ebit), \
}
#define DA9034_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
DA903x_LDO(DA9034, _id, min, max, step, vreg, shift, nbits, ereg, ebit)
#define DA9030_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
DA903x_LDO(DA9030, _id, min, max, step, vreg, shift, nbits, ereg, ebit)
#define DA9030_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
DA903x_DVC(DA9030, _id, min, max, step, vreg, nbits, ureg, ubit, \
ereg, ebit)
#define DA9034_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
DA903x_DVC(DA9034, _id, min, max, step, vreg, nbits, ureg, ubit, \
ereg, ebit)
#define DA9035_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
DA903x_DVC(DA9035, _id, min, max, step, vreg, nbits, ureg, ubit, \
ereg, ebit)
static struct da903x_regulator_info da903x_regulator_info[] = {
/* DA9030 */
DA9030_DVC(BUCK2, 850, 1625, 25, BUCK2DVM1, 5, BUCK2DVM1, 7, RCTL11, 0),
DA9030_LDO( 1, 1200, 3200, 100, LDO1, 0, 5, RCTL12, 1),
DA9030_LDO( 2, 1800, 3200, 100, LDO23, 0, 4, RCTL12, 2),
DA9030_LDO( 3, 1800, 3200, 100, LDO23, 4, 4, RCTL12, 3),
DA9030_LDO( 4, 1800, 3200, 100, LDO45, 0, 4, RCTL12, 4),
DA9030_LDO( 5, 1800, 3200, 100, LDO45, 4, 4, RCTL12, 5),
DA9030_LDO( 6, 1800, 3200, 100, LDO6, 0, 4, RCTL12, 6),
DA9030_LDO( 7, 1800, 3200, 100, LDO78, 0, 4, RCTL12, 7),
DA9030_LDO( 8, 1800, 3200, 100, LDO78, 4, 4, RCTL22, 0),
DA9030_LDO( 9, 1800, 3200, 100, LDO912, 0, 4, RCTL22, 1),
DA9030_LDO(10, 1800, 3200, 100, LDO1011, 0, 4, RCTL22, 2),
DA9030_LDO(11, 1800, 3200, 100, LDO1011, 4, 4, RCTL22, 3),
DA9030_LDO(12, 1800, 3200, 100, LDO912, 4, 4, RCTL22, 4),
DA9030_LDO(14, 2760, 2940, 30, LDO1416, 0, 3, RCTL11, 4),
DA9030_LDO(15, 1100, 2650, 50, LDO15, 0, 5, RCTL11, 5),
DA9030_LDO(16, 1100, 2650, 50, LDO1416, 3, 5, RCTL11, 6),
DA9030_LDO(17, 1800, 3200, 100, LDO17, 0, 4, RCTL11, 7),
DA9030_LDO(18, 1800, 3200, 100, LDO1819, 0, 4, RCTL21, 2),
DA9030_LDO(19, 1800, 3200, 100, LDO1819, 4, 4, RCTL21, 1),
DA9030_LDO(13, 2100, 2100, 0, INVAL, 0, 0, RCTL11, 3), /* fixed @2.1V */
/* DA9034 */
DA9034_DVC(BUCK1, 725, 1500, 25, ADTV2, 5, VCC1, 0, OVER1, 0),
DA9034_DVC(BUCK2, 725, 1500, 25, CDTV2, 5, VCC1, 2, OVER1, 1),
DA9034_DVC(LDO2, 725, 1500, 25, SDTV2, 5, VCC1, 4, OVER1, 2),
DA9034_DVC(LDO1, 1700, 2075, 25, MDTV1, 4, VCC1, 6, OVER3, 4),
DA9034_LDO( 3, 1800, 3300, 100, LDO643, 0, 4, OVER3, 5),
DA9034_LDO( 4, 1800, 2900,1100, LDO643, 4, 1, OVER3, 6),
DA9034_LDO( 6, 2500, 2850, 50, LDO643, 5, 3, OVER2, 0),
DA9034_LDO( 7, 2700, 3050, 50, LDO987, 0, 3, OVER2, 1),
DA9034_LDO( 8, 2700, 2850, 50, LDO987, 3, 2, OVER2, 2),
DA9034_LDO( 9, 2700, 3050, 50, LDO987, 5, 3, OVER2, 3),
DA9034_LDO(10, 2700, 3050, 50, LDO1110, 0, 3, OVER2, 4),
DA9034_LDO(11, 1800, 3300, 100, LDO1110, 4, 4, OVER2, 5),
DA9034_LDO(12, 1700, 3050, 50, LDO1312, 0, 4, OVER3, 6),
DA9034_LDO(13, 1800, 3300, 100, LDO1312, 4, 4, OVER2, 7),
DA9034_LDO(14, 1800, 3300, 100, LDO1514, 0, 4, OVER3, 0),
DA9034_LDO(15, 1800, 3300, 100, LDO1514, 4, 4, OVER3, 1),
DA9034_LDO(5, 3100, 3100, 0, INVAL, 0, 0, OVER3, 7), /* fixed @3.1V */
/* DA9035 */
DA9035_DVC(BUCK3, 1800, 2200, 100, 3DTV1, 3, VCC2, 0, OVER3, 3),
};
static inline struct da903x_regulator_info *find_regulator_info(int id)
{
struct da903x_regulator_info *ri;
int i;
for (i = 0; i < ARRAY_SIZE(da903x_regulator_info); i++) {
ri = &da903x_regulator_info[i];
if (ri->desc.id == id)
return ri;
}
return NULL;
}
static int da903x_regulator_probe(struct platform_device *pdev)
{
struct da903x_regulator_info *ri = NULL;
struct regulator_dev *rdev;
struct regulator_config config = { };
ri = find_regulator_info(pdev->id);
if (ri == NULL) {
dev_err(&pdev->dev, "invalid regulator ID specified\n");
return -EINVAL;
}
/* Workaround for the weird LDO12 voltage setting */
if (ri->desc.id == DA9034_ID_LDO12) {
ri->desc.ops = &da9034_regulator_ldo12_ops;
ri->desc.n_voltages = 16;
ri->desc.linear_ranges = da9034_ldo12_ranges;
ri->desc.n_linear_ranges = ARRAY_SIZE(da9034_ldo12_ranges);
}
if (ri->desc.id == DA9030_ID_LDO14)
ri->desc.ops = &da9030_regulator_ldo14_ops;
if (ri->desc.id == DA9030_ID_LDO1 || ri->desc.id == DA9030_ID_LDO15)
ri->desc.ops = &da9030_regulator_ldo1_15_ops;
config.dev = &pdev->dev;
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = ri;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver da903x_regulator_driver = {
.driver = {
.name = "da903x-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = da903x_regulator_probe,
};
static int __init da903x_regulator_init(void)
{
return platform_driver_register(&da903x_regulator_driver);
}
subsys_initcall(da903x_regulator_init);
static void __exit da903x_regulator_exit(void)
{
platform_driver_unregister(&da903x_regulator_driver);
}
module_exit(da903x_regulator_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Miao <[email protected]>"
"Mike Rapoport <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Dialog Semiconductor DA903X PMIC");
MODULE_ALIAS("platform:da903x-regulator");
| linux-master | drivers/regulator/da903x-regulator.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AS3711 PMIC regulator driver, using DCDC Step Down and LDO supplies
*
* Copyright (C) 2012 Renesas Electronics Corporation
* Author: Guennadi Liakhovetski, <[email protected]>
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mfd/as3711.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
/*
* The regulator API supports 4 modes of operataion: FAST, NORMAL, IDLE and
* STANDBY. We map them in the following way to AS3711 SD1-4 DCDC modes:
* FAST: sdX_fast=1
* NORMAL: low_noise=1
* IDLE: low_noise=0
*/
static int as3711_set_mode_sd(struct regulator_dev *rdev, unsigned int mode)
{
unsigned int fast_bit = rdev->desc->enable_mask,
low_noise_bit = fast_bit << 4;
u8 val;
switch (mode) {
case REGULATOR_MODE_FAST:
val = fast_bit | low_noise_bit;
break;
case REGULATOR_MODE_NORMAL:
val = low_noise_bit;
break;
case REGULATOR_MODE_IDLE:
val = 0;
break;
default:
return -EINVAL;
}
return regmap_update_bits(rdev->regmap, AS3711_SD_CONTROL_1,
low_noise_bit | fast_bit, val);
}
static unsigned int as3711_get_mode_sd(struct regulator_dev *rdev)
{
unsigned int fast_bit = rdev->desc->enable_mask,
low_noise_bit = fast_bit << 4, mask = fast_bit | low_noise_bit;
unsigned int val;
int ret = regmap_read(rdev->regmap, AS3711_SD_CONTROL_1, &val);
if (ret < 0)
return ret;
if ((val & mask) == mask)
return REGULATOR_MODE_FAST;
if ((val & mask) == low_noise_bit)
return REGULATOR_MODE_NORMAL;
if (!(val & mask))
return REGULATOR_MODE_IDLE;
return -EINVAL;
}
static const struct regulator_ops as3711_sd_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_mode = as3711_get_mode_sd,
.set_mode = as3711_set_mode_sd,
};
static const struct regulator_ops as3711_aldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
};
static const struct regulator_ops as3711_dldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
};
static const struct linear_range as3711_sd_ranges[] = {
REGULATOR_LINEAR_RANGE(612500, 0x1, 0x40, 12500),
REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7f, 50000),
};
static const struct linear_range as3711_aldo_ranges[] = {
REGULATOR_LINEAR_RANGE(1200000, 0, 0xf, 50000),
REGULATOR_LINEAR_RANGE(1800000, 0x10, 0x1f, 100000),
};
static const struct linear_range as3711_dldo_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 0x10, 50000),
REGULATOR_LINEAR_RANGE(1750000, 0x20, 0x3f, 50000),
};
#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _sfx) \
[AS3711_REGULATOR_ ## _id] = { \
.name = "as3711-regulator-" # _id, \
.id = AS3711_REGULATOR_ ## _id, \
.n_voltages = (_vmask + 1), \
.ops = &as3711_ ## _sfx ## _ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = AS3711_ ## _id ## _VOLTAGE, \
.vsel_mask = _vmask, \
.enable_reg = AS3711_ ## _en_reg, \
.enable_mask = BIT(_en_bit), \
.linear_ranges = as3711_ ## _sfx ## _ranges, \
.n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \
}
static const struct regulator_desc as3711_reg_desc[] = {
AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, sd),
AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, sd),
AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, sd),
AS3711_REG(SD_4, SD_CONTROL, 3, 0x7f, sd),
AS3711_REG(LDO_1, LDO_1_VOLTAGE, 7, 0x1f, aldo),
AS3711_REG(LDO_2, LDO_2_VOLTAGE, 7, 0x1f, aldo),
AS3711_REG(LDO_3, LDO_3_VOLTAGE, 7, 0x3f, dldo),
AS3711_REG(LDO_4, LDO_4_VOLTAGE, 7, 0x3f, dldo),
AS3711_REG(LDO_5, LDO_5_VOLTAGE, 7, 0x3f, dldo),
AS3711_REG(LDO_6, LDO_6_VOLTAGE, 7, 0x3f, dldo),
AS3711_REG(LDO_7, LDO_7_VOLTAGE, 7, 0x3f, dldo),
AS3711_REG(LDO_8, LDO_8_VOLTAGE, 7, 0x3f, dldo),
/* StepUp output voltage depends on supplying regulator */
};
#define AS3711_REGULATOR_NUM ARRAY_SIZE(as3711_reg_desc)
static struct of_regulator_match
as3711_regulator_matches[AS3711_REGULATOR_NUM] = {
[AS3711_REGULATOR_SD_1] = { .name = "sd1" },
[AS3711_REGULATOR_SD_2] = { .name = "sd2" },
[AS3711_REGULATOR_SD_3] = { .name = "sd3" },
[AS3711_REGULATOR_SD_4] = { .name = "sd4" },
[AS3711_REGULATOR_LDO_1] = { .name = "ldo1" },
[AS3711_REGULATOR_LDO_2] = { .name = "ldo2" },
[AS3711_REGULATOR_LDO_3] = { .name = "ldo3" },
[AS3711_REGULATOR_LDO_4] = { .name = "ldo4" },
[AS3711_REGULATOR_LDO_5] = { .name = "ldo5" },
[AS3711_REGULATOR_LDO_6] = { .name = "ldo6" },
[AS3711_REGULATOR_LDO_7] = { .name = "ldo7" },
[AS3711_REGULATOR_LDO_8] = { .name = "ldo8" },
};
static int as3711_regulator_parse_dt(struct device *dev,
struct device_node **of_node, const int count)
{
struct as3711_regulator_pdata *pdata = dev_get_platdata(dev);
struct device_node *regulators =
of_get_child_by_name(dev->parent->of_node, "regulators");
struct of_regulator_match *match;
int ret, i;
if (!regulators) {
dev_err(dev, "regulator node not found\n");
return -ENODEV;
}
ret = of_regulator_match(dev->parent, regulators,
as3711_regulator_matches, count);
of_node_put(regulators);
if (ret < 0) {
dev_err(dev, "Error parsing regulator init data: %d\n", ret);
return ret;
}
for (i = 0, match = as3711_regulator_matches; i < count; i++, match++)
if (match->of_node) {
pdata->init_data[i] = match->init_data;
of_node[i] = match->of_node;
}
return 0;
}
static int as3711_regulator_probe(struct platform_device *pdev)
{
struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = {.dev = &pdev->dev,};
struct device_node *of_node[AS3711_REGULATOR_NUM] = {};
struct regulator_dev *rdev;
int ret;
int id;
if (!pdata) {
dev_err(&pdev->dev, "No platform data...\n");
return -ENODEV;
}
if (pdev->dev.parent->of_node) {
ret = as3711_regulator_parse_dt(&pdev->dev, of_node, AS3711_REGULATOR_NUM);
if (ret < 0) {
dev_err(&pdev->dev, "DT parsing failed: %d\n", ret);
return ret;
}
}
for (id = 0; id < AS3711_REGULATOR_NUM; id++) {
config.init_data = pdata->init_data[id];
config.regmap = as3711->regmap;
config.of_node = of_node[id];
rdev = devm_regulator_register(&pdev->dev, &as3711_reg_desc[id],
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
as3711_reg_desc[id].name);
return PTR_ERR(rdev);
}
}
return 0;
}
static struct platform_driver as3711_regulator_driver = {
.driver = {
.name = "as3711-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = as3711_regulator_probe,
};
static int __init as3711_regulator_init(void)
{
return platform_driver_register(&as3711_regulator_driver);
}
subsys_initcall(as3711_regulator_init);
static void __exit as3711_regulator_exit(void)
{
platform_driver_unregister(&as3711_regulator_driver);
}
module_exit(as3711_regulator_exit);
MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>");
MODULE_DESCRIPTION("AS3711 regulator driver");
MODULE_ALIAS("platform:as3711-regulator");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/as3711-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
#define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
#define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
#define LDO_POWER_GATE 0x00
#define LDO_FET_FULL_ON 0x1f
struct anatop_regulator {
u32 delay_reg;
int delay_bit_shift;
int delay_bit_width;
struct regulator_desc rdesc;
bool bypass;
int sel;
};
static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
unsigned int old_sel,
unsigned int new_sel)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
u32 val;
int ret = 0;
/* check whether need to care about LDO ramp up speed */
if (anatop_reg->delay_bit_width && new_sel > old_sel) {
/*
* the delay for LDO ramp up time is
* based on the register setting, we need
* to calculate how many steps LDO need to
* ramp up, and how much delay needed. (us)
*/
regmap_read(reg->regmap, anatop_reg->delay_reg, &val);
val = (val >> anatop_reg->delay_bit_shift) &
((1 << anatop_reg->delay_bit_width) - 1);
ret = (new_sel - old_sel) * (LDO_RAMP_UP_UNIT_IN_CYCLES <<
val) / LDO_RAMP_UP_FREQ_IN_MHZ + 1;
}
return ret;
}
static int anatop_regmap_enable(struct regulator_dev *reg)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
int sel;
sel = anatop_reg->bypass ? LDO_FET_FULL_ON : anatop_reg->sel;
return regulator_set_voltage_sel_regmap(reg, sel);
}
static int anatop_regmap_disable(struct regulator_dev *reg)
{
return regulator_set_voltage_sel_regmap(reg, LDO_POWER_GATE);
}
static int anatop_regmap_is_enabled(struct regulator_dev *reg)
{
return regulator_get_voltage_sel_regmap(reg) != LDO_POWER_GATE;
}
static int anatop_regmap_core_set_voltage_sel(struct regulator_dev *reg,
unsigned selector)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
int ret;
if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg)) {
anatop_reg->sel = selector;
return 0;
}
ret = regulator_set_voltage_sel_regmap(reg, selector);
if (!ret)
anatop_reg->sel = selector;
return ret;
}
static int anatop_regmap_core_get_voltage_sel(struct regulator_dev *reg)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg))
return anatop_reg->sel;
return regulator_get_voltage_sel_regmap(reg);
}
static int anatop_regmap_get_bypass(struct regulator_dev *reg, bool *enable)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
int sel;
sel = regulator_get_voltage_sel_regmap(reg);
if (sel == LDO_FET_FULL_ON)
WARN_ON(!anatop_reg->bypass);
else if (sel != LDO_POWER_GATE)
WARN_ON(anatop_reg->bypass);
*enable = anatop_reg->bypass;
return 0;
}
static int anatop_regmap_set_bypass(struct regulator_dev *reg, bool enable)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
int sel;
if (enable == anatop_reg->bypass)
return 0;
sel = enable ? LDO_FET_FULL_ON : anatop_reg->sel;
anatop_reg->bypass = enable;
return regulator_set_voltage_sel_regmap(reg, sel);
}
static struct regulator_ops anatop_rops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
};
static const struct regulator_ops anatop_core_rops = {
.enable = anatop_regmap_enable,
.disable = anatop_regmap_disable,
.is_enabled = anatop_regmap_is_enabled,
.set_voltage_sel = anatop_regmap_core_set_voltage_sel,
.set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
.get_voltage_sel = anatop_regmap_core_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_bypass = anatop_regmap_get_bypass,
.set_bypass = anatop_regmap_set_bypass,
};
static int anatop_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *anatop_np;
struct regulator_desc *rdesc;
struct regulator_dev *rdev;
struct anatop_regulator *sreg;
struct regulator_init_data *initdata;
struct regulator_config config = { };
struct regmap *regmap;
u32 control_reg;
u32 vol_bit_shift;
u32 vol_bit_width;
u32 min_bit_val;
u32 min_voltage;
u32 max_voltage;
int ret = 0;
u32 val;
sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
if (!sreg)
return -ENOMEM;
rdesc = &sreg->rdesc;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->owner = THIS_MODULE;
of_property_read_string(np, "regulator-name", &rdesc->name);
if (!rdesc->name) {
dev_err(dev, "failed to get a regulator-name\n");
return -EINVAL;
}
initdata = of_get_regulator_init_data(dev, np, rdesc);
if (!initdata)
return -ENOMEM;
initdata->supply_regulator = "vin";
anatop_np = of_get_parent(np);
if (!anatop_np)
return -ENODEV;
regmap = syscon_node_to_regmap(anatop_np);
of_node_put(anatop_np);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ret = of_property_read_u32(np, "anatop-reg-offset", &control_reg);
if (ret) {
dev_err(dev, "no anatop-reg-offset property set\n");
return ret;
}
ret = of_property_read_u32(np, "anatop-vol-bit-width", &vol_bit_width);
if (ret) {
dev_err(dev, "no anatop-vol-bit-width property set\n");
return ret;
}
ret = of_property_read_u32(np, "anatop-vol-bit-shift", &vol_bit_shift);
if (ret) {
dev_err(dev, "no anatop-vol-bit-shift property set\n");
return ret;
}
ret = of_property_read_u32(np, "anatop-min-bit-val", &min_bit_val);
if (ret) {
dev_err(dev, "no anatop-min-bit-val property set\n");
return ret;
}
ret = of_property_read_u32(np, "anatop-min-voltage", &min_voltage);
if (ret) {
dev_err(dev, "no anatop-min-voltage property set\n");
return ret;
}
ret = of_property_read_u32(np, "anatop-max-voltage", &max_voltage);
if (ret) {
dev_err(dev, "no anatop-max-voltage property set\n");
return ret;
}
/* read LDO ramp up setting, only for core reg */
of_property_read_u32(np, "anatop-delay-reg-offset",
&sreg->delay_reg);
of_property_read_u32(np, "anatop-delay-bit-width",
&sreg->delay_bit_width);
of_property_read_u32(np, "anatop-delay-bit-shift",
&sreg->delay_bit_shift);
rdesc->n_voltages = (max_voltage - min_voltage) / 25000 + 1
+ min_bit_val;
rdesc->min_uV = min_voltage;
rdesc->uV_step = 25000;
rdesc->linear_min_sel = min_bit_val;
rdesc->vsel_reg = control_reg;
rdesc->vsel_mask = ((1 << vol_bit_width) - 1) << vol_bit_shift;
rdesc->min_dropout_uV = 125000;
config.dev = &pdev->dev;
config.init_data = initdata;
config.driver_data = sreg;
config.of_node = pdev->dev.of_node;
config.regmap = regmap;
/* Only core regulators have the ramp up delay configuration. */
if (control_reg && sreg->delay_bit_width) {
rdesc->ops = &anatop_core_rops;
ret = regmap_read(config.regmap, rdesc->vsel_reg, &val);
if (ret) {
dev_err(dev, "failed to read initial state\n");
return ret;
}
sreg->sel = (val & rdesc->vsel_mask) >> vol_bit_shift;
if (sreg->sel == LDO_FET_FULL_ON) {
sreg->sel = 0;
sreg->bypass = true;
}
/*
* In case vddpu was disabled by the bootloader, we need to set
* a sane default until imx6-cpufreq was probed and changes the
* voltage to the correct value. In this case we set 1.25V.
*/
if (!sreg->sel && !strcmp(rdesc->name, "vddpu"))
sreg->sel = 22;
/* set the default voltage of the pcie phy to be 1.100v */
if (!sreg->sel && !strcmp(rdesc->name, "vddpcie"))
sreg->sel = 0x10;
if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
}
} else {
u32 enable_bit;
rdesc->ops = &anatop_rops;
if (!of_property_read_u32(np, "anatop-enable-bit",
&enable_bit)) {
anatop_rops.enable = regulator_enable_regmap;
anatop_rops.disable = regulator_disable_regmap;
anatop_rops.is_enabled = regulator_is_enabled_regmap;
rdesc->enable_reg = control_reg;
rdesc->enable_mask = BIT(enable_bit);
}
}
/* register regulator */
rdev = devm_regulator_register(dev, rdesc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
if (ret == -EPROBE_DEFER)
dev_dbg(dev, "failed to register %s, deferring...\n",
rdesc->name);
else
dev_err(dev, "failed to register %s\n", rdesc->name);
return ret;
}
platform_set_drvdata(pdev, rdev);
return 0;
}
static const struct of_device_id of_anatop_regulator_match_tbl[] = {
{ .compatible = "fsl,anatop-regulator", },
{ /* end */ }
};
MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl);
static struct platform_driver anatop_regulator_driver = {
.driver = {
.name = "anatop_regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
};
static int __init anatop_regulator_init(void)
{
return platform_driver_register(&anatop_regulator_driver);
}
postcore_initcall(anatop_regulator_init);
static void __exit anatop_regulator_exit(void)
{
platform_driver_unregister(&anatop_regulator_driver);
}
module_exit(anatop_regulator_exit);
MODULE_AUTHOR("Nancy Chen <[email protected]>");
MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <[email protected]>");
MODULE_DESCRIPTION("ANATOP Regulator driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:anatop_regulator");
| linux-master | drivers/regulator/anatop-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) ST-Ericsson SA 2010
*
* Authors: Sundar Iyer <[email protected]> for ST-Ericsson
* Bengt Jonsson <[email protected]> for ST-Ericsson
*
* UX500 common part of Power domain regulators
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/regulator/driver.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "dbx500-prcmu.h"
/*
* power state reference count
*/
static int power_state_active_cnt; /* will initialize to zero */
static DEFINE_SPINLOCK(power_state_active_lock);
void power_state_active_enable(void)
{
unsigned long flags;
spin_lock_irqsave(&power_state_active_lock, flags);
power_state_active_cnt++;
spin_unlock_irqrestore(&power_state_active_lock, flags);
}
int power_state_active_disable(void)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&power_state_active_lock, flags);
if (power_state_active_cnt <= 0) {
pr_err("power state: unbalanced enable/disable calls\n");
ret = -EINVAL;
goto out;
}
power_state_active_cnt--;
out:
spin_unlock_irqrestore(&power_state_active_lock, flags);
return ret;
}
#ifdef CONFIG_REGULATOR_DEBUG
static int power_state_active_get(void)
{
unsigned long flags;
int cnt;
spin_lock_irqsave(&power_state_active_lock, flags);
cnt = power_state_active_cnt;
spin_unlock_irqrestore(&power_state_active_lock, flags);
return cnt;
}
static struct ux500_regulator_debug {
struct dentry *dir;
struct dbx500_regulator_info *regulator_array;
int num_regulators;
u8 *state_before_suspend;
u8 *state_after_suspend;
} rdebug;
static int ux500_regulator_power_state_cnt_show(struct seq_file *s, void *p)
{
/* print power state count */
seq_printf(s, "ux500-regulator power state count: %i\n",
power_state_active_get());
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ux500_regulator_power_state_cnt);
static int ux500_regulator_status_show(struct seq_file *s, void *p)
{
int i;
/* print dump header */
seq_puts(s, "ux500-regulator status:\n");
seq_printf(s, "%31s : %8s : %8s\n", "current", "before", "after");
for (i = 0; i < rdebug.num_regulators; i++) {
struct dbx500_regulator_info *info;
/* Access per-regulator data */
info = &rdebug.regulator_array[i];
/* print status */
seq_printf(s, "%20s : %8s : %8s : %8s\n",
info->desc.name,
info->is_enabled ? "enabled" : "disabled",
rdebug.state_before_suspend[i] ? "enabled" : "disabled",
rdebug.state_after_suspend[i] ? "enabled" : "disabled");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ux500_regulator_status);
int
ux500_regulator_debug_init(struct platform_device *pdev,
struct dbx500_regulator_info *regulator_info,
int num_regulators)
{
/* create directory */
rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
/* create "status" file */
debugfs_create_file("status", 0444, rdebug.dir, &pdev->dev,
&ux500_regulator_status_fops);
/* create "power-state-count" file */
debugfs_create_file("power-state-count", 0444, rdebug.dir,
&pdev->dev, &ux500_regulator_power_state_cnt_fops);
rdebug.regulator_array = regulator_info;
rdebug.num_regulators = num_regulators;
rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL);
if (!rdebug.state_before_suspend)
goto exit_destroy_power_state;
rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL);
if (!rdebug.state_after_suspend)
goto exit_free;
return 0;
exit_free:
kfree(rdebug.state_before_suspend);
exit_destroy_power_state:
debugfs_remove_recursive(rdebug.dir);
return -ENOMEM;
}
int ux500_regulator_debug_exit(void)
{
debugfs_remove_recursive(rdebug.dir);
kfree(rdebug.state_after_suspend);
kfree(rdebug.state_before_suspend);
return 0;
}
#endif
| linux-master | drivers/regulator/dbx500-prcmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Gateworks Corporation, Inc. All Rights Reserved.
*/
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#define DRIVER_NAME "ltc3676"
/* LTC3676 Registers */
#define LTC3676_BUCK1 0x01
#define LTC3676_BUCK2 0x02
#define LTC3676_BUCK3 0x03
#define LTC3676_BUCK4 0x04
#define LTC3676_LDOA 0x05
#define LTC3676_LDOB 0x06
#define LTC3676_SQD1 0x07
#define LTC3676_SQD2 0x08
#define LTC3676_CNTRL 0x09
#define LTC3676_DVB1A 0x0A
#define LTC3676_DVB1B 0x0B
#define LTC3676_DVB2A 0x0C
#define LTC3676_DVB2B 0x0D
#define LTC3676_DVB3A 0x0E
#define LTC3676_DVB3B 0x0F
#define LTC3676_DVB4A 0x10
#define LTC3676_DVB4B 0x11
#define LTC3676_MSKIRQ 0x12
#define LTC3676_MSKPG 0x13
#define LTC3676_USER 0x14
#define LTC3676_IRQSTAT 0x15
#define LTC3676_PGSTATL 0x16
#define LTC3676_PGSTATRT 0x17
#define LTC3676_HRST 0x1E
#define LTC3676_CLIRQ 0x1F
#define LTC3676_DVBxA_REF_SELECT BIT(5)
#define LTC3676_DVBxB_PGOOD_MASK BIT(5)
#define LTC3676_IRQSTAT_PGOOD_TIMEOUT BIT(3)
#define LTC3676_IRQSTAT_UNDERVOLT_WARN BIT(4)
#define LTC3676_IRQSTAT_UNDERVOLT_FAULT BIT(5)
#define LTC3676_IRQSTAT_THERMAL_WARN BIT(6)
#define LTC3676_IRQSTAT_THERMAL_FAULT BIT(7)
enum ltc3676_reg {
LTC3676_SW1,
LTC3676_SW2,
LTC3676_SW3,
LTC3676_SW4,
LTC3676_LDO1,
LTC3676_LDO2,
LTC3676_LDO3,
LTC3676_LDO4,
LTC3676_NUM_REGULATORS,
};
struct ltc3676 {
struct regmap *regmap;
struct device *dev;
struct regulator_desc regulator_descs[LTC3676_NUM_REGULATORS];
struct regulator_dev *regulators[LTC3676_NUM_REGULATORS];
};
static int ltc3676_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct ltc3676 *ltc3676 = rdev_get_drvdata(rdev);
struct device *dev = ltc3676->dev;
int dcdc = rdev_get_id(rdev);
int sel;
dev_dbg(dev, "%s id=%d uV=%d\n", __func__, dcdc, uV);
sel = regulator_map_voltage_linear(rdev, uV, uV);
if (sel < 0)
return sel;
/* DVBB register follows right after the corresponding DVBA register */
return regmap_update_bits(ltc3676->regmap, rdev->desc->vsel_reg + 1,
rdev->desc->vsel_mask, sel);
}
static int ltc3676_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct ltc3676 *ltc3676= rdev_get_drvdata(rdev);
struct device *dev = ltc3676->dev;
int mask, val;
int dcdc = rdev_get_id(rdev);
dev_dbg(dev, "%s id=%d mode=%d\n", __func__, dcdc, mode);
mask = LTC3676_DVBxA_REF_SELECT;
switch (mode) {
case REGULATOR_MODE_STANDBY:
val = 0; /* select DVBxA */
break;
case REGULATOR_MODE_NORMAL:
val = LTC3676_DVBxA_REF_SELECT; /* select DVBxB */
break;
default:
dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
rdev->desc->name, mode);
return -EINVAL;
}
return regmap_update_bits(ltc3676->regmap, rdev->desc->vsel_reg,
mask, val);
}
static int ltc3676_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
struct ltc3676 *ltc3676 = rdev_get_drvdata(rdev);
struct device *dev = ltc3676->dev;
int ret, dcdc = rdev_get_id(rdev);
dev_dbg(dev, "%s id=%d selector=%d\n", __func__, dcdc, selector);
ret = regmap_update_bits(ltc3676->regmap, rdev->desc->vsel_reg + 1,
LTC3676_DVBxB_PGOOD_MASK,
LTC3676_DVBxB_PGOOD_MASK);
if (ret)
return ret;
return regulator_set_voltage_sel_regmap(rdev, selector);
}
static inline unsigned int ltc3676_scale(unsigned int uV, u32 r1, u32 r2)
{
uint64_t tmp;
if (uV == 0)
return 0;
tmp = (uint64_t)uV * r1;
do_div(tmp, r2);
return uV + (unsigned int)tmp;
}
static int ltc3676_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct ltc3676 *ltc3676 = config->driver_data;
struct regulator_desc *rdesc = <c3676->regulator_descs[desc->id];
u32 r[2];
int ret;
/* LDO3 has a fixed output */
if (desc->id == LTC3676_LDO3)
return 0;
ret = of_property_read_u32_array(np, "lltc,fb-voltage-divider", r, 2);
if (ret) {
dev_err(ltc3676->dev, "Failed to parse voltage divider: %d\n",
ret);
return ret;
}
rdesc->min_uV = ltc3676_scale(desc->min_uV, r[0], r[1]);
rdesc->uV_step = ltc3676_scale(desc->uV_step, r[0], r[1]);
rdesc->fixed_uV = ltc3676_scale(desc->fixed_uV, r[0], r[1]);
return 0;
}
/* SW1, SW2, SW3, SW4 linear 0.8V-3.3V with scalar via R1/R2 feeback res */
static const struct regulator_ops ltc3676_linear_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = ltc3676_set_voltage_sel,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_suspend_voltage = ltc3676_set_suspend_voltage,
.set_suspend_mode = ltc3676_set_suspend_mode,
};
/* LDO1 always on fixed 0.8V-3.3V via scalar via R1/R2 feeback res */
static const struct regulator_ops ltc3676_fixed_standby_regulator_ops = {
};
/* LDO2, LDO3 fixed (LDO2 has external scalar via R1/R2 feedback res) */
static const struct regulator_ops ltc3676_fixed_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
#define LTC3676_REG(_id, _name, _ops, en_reg, en_bit, dvba_reg, dvb_mask) \
[LTC3676_ ## _id] = { \
.name = #_name, \
.of_match = of_match_ptr(#_name), \
.regulators_node = of_match_ptr("regulators"), \
.of_parse_cb = ltc3676_of_parse_cb, \
.n_voltages = (dvb_mask) + 1, \
.min_uV = (dvba_reg) ? 412500 : 0, \
.uV_step = (dvba_reg) ? 12500 : 0, \
.ramp_delay = (dvba_reg) ? 800 : 0, \
.fixed_uV = (dvb_mask) ? 0 : 725000, \
.ops = <c3676_ ## _ops ## _regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = LTC3676_ ## _id, \
.owner = THIS_MODULE, \
.vsel_reg = (dvba_reg), \
.vsel_mask = (dvb_mask), \
.enable_reg = (en_reg), \
.enable_mask = (1 << en_bit), \
}
#define LTC3676_LINEAR_REG(_id, _name, _en, _dvba) \
LTC3676_REG(_id, _name, linear, \
LTC3676_ ## _en, 7, \
LTC3676_ ## _dvba, 0x1f)
#define LTC3676_FIXED_REG(_id, _name, _en_reg, _en_bit) \
LTC3676_REG(_id, _name, fixed, LTC3676_ ## _en_reg, _en_bit, 0, 0)
static const struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = {
LTC3676_LINEAR_REG(SW1, sw1, BUCK1, DVB1A),
LTC3676_LINEAR_REG(SW2, sw2, BUCK2, DVB2A),
LTC3676_LINEAR_REG(SW3, sw3, BUCK3, DVB3A),
LTC3676_LINEAR_REG(SW4, sw4, BUCK4, DVB4A),
LTC3676_REG(LDO1, ldo1, fixed_standby, 0, 0, 0, 0),
LTC3676_FIXED_REG(LDO2, ldo2, LDOA, 2),
LTC3676_FIXED_REG(LDO3, ldo3, LDOA, 5),
LTC3676_FIXED_REG(LDO4, ldo4, LDOB, 2),
};
static bool ltc3676_readable_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case LTC3676_BUCK1 ... LTC3676_IRQSTAT:
case LTC3676_HRST:
case LTC3676_CLIRQ:
return true;
}
return false;
}
static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case LTC3676_IRQSTAT ... LTC3676_PGSTATRT:
return true;
}
return false;
}
static const struct regmap_config ltc3676_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.writeable_reg = ltc3676_readable_writeable_reg,
.readable_reg = ltc3676_readable_writeable_reg,
.volatile_reg = ltc3676_volatile_reg,
.max_register = LTC3676_CLIRQ,
.use_single_read = true,
.use_single_write = true,
.cache_type = REGCACHE_MAPLE,
};
static irqreturn_t ltc3676_isr(int irq, void *dev_id)
{
struct ltc3676 *ltc3676 = dev_id;
struct device *dev = ltc3676->dev;
unsigned int i, irqstat, event;
regmap_read(ltc3676->regmap, LTC3676_IRQSTAT, &irqstat);
dev_dbg(dev, "irq%d irqstat=0x%02x\n", irq, irqstat);
if (irqstat & LTC3676_IRQSTAT_THERMAL_WARN) {
dev_warn(dev, "Over-temperature Warning\n");
event = REGULATOR_EVENT_OVER_TEMP;
for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
}
if (irqstat & LTC3676_IRQSTAT_UNDERVOLT_WARN) {
dev_info(dev, "Undervoltage Warning\n");
event = REGULATOR_EVENT_UNDER_VOLTAGE;
for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
}
/* Clear warning condition */
regmap_write(ltc3676->regmap, LTC3676_CLIRQ, 0);
return IRQ_HANDLED;
}
static int ltc3676_regulator_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator_init_data *init_data = dev_get_platdata(dev);
struct regulator_desc *descs;
struct ltc3676 *ltc3676;
int i, ret;
ltc3676 = devm_kzalloc(dev, sizeof(*ltc3676), GFP_KERNEL);
if (!ltc3676)
return -ENOMEM;
i2c_set_clientdata(client, ltc3676);
ltc3676->dev = dev;
descs = ltc3676->regulator_descs;
memcpy(descs, ltc3676_regulators, sizeof(ltc3676_regulators));
descs[LTC3676_LDO3].fixed_uV = 1800000; /* LDO3 is fixed 1.8V */
ltc3676->regmap = devm_regmap_init_i2c(client, <c3676_regmap_config);
if (IS_ERR(ltc3676->regmap)) {
ret = PTR_ERR(ltc3676->regmap);
dev_err(dev, "failed to initialize regmap: %d\n", ret);
return ret;
}
for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
struct regulator_desc *desc = <c3676->regulator_descs[i];
struct regulator_config config = { };
if (init_data)
config.init_data = &init_data[i];
config.dev = dev;
config.driver_data = ltc3676;
ltc3676->regulators[i] = devm_regulator_register(dev, desc,
&config);
if (IS_ERR(ltc3676->regulators[i])) {
ret = PTR_ERR(ltc3676->regulators[i]);
dev_err(dev, "failed to register regulator %s: %d\n",
desc->name, ret);
return ret;
}
}
regmap_write(ltc3676->regmap, LTC3676_CLIRQ, 0);
if (client->irq) {
ret = devm_request_threaded_irq(dev, client->irq, NULL,
ltc3676_isr,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
client->name, ltc3676);
if (ret) {
dev_err(dev, "Failed to request IRQ: %d\n", ret);
return ret;
}
}
return 0;
}
static const struct i2c_device_id ltc3676_i2c_id[] = {
{ "ltc3676" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc3676_i2c_id);
static const struct of_device_id __maybe_unused ltc3676_of_match[] = {
{ .compatible = "lltc,ltc3676" },
{ },
};
MODULE_DEVICE_TABLE(of, ltc3676_of_match);
static struct i2c_driver ltc3676_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(ltc3676_of_match),
},
.probe = ltc3676_regulator_probe,
.id_table = ltc3676_i2c_id,
};
module_i2c_driver(ltc3676_driver);
MODULE_AUTHOR("Tim Harvey <[email protected]>");
MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3676");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/ltc3676.c |
// SPDX-License-Identifier: GPL-2.0
//
// Device driver for regulators in Hi655x IC
//
// Copyright (c) 2016 HiSilicon Ltd.
//
// Authors:
// Chen Feng <[email protected]>
// Fei Wang <[email protected]>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/hi655x-pmic.h>
struct hi655x_regulator {
unsigned int disable_reg;
unsigned int status_reg;
struct regulator_desc rdesc;
};
/* LDO7 & LDO10 */
static const unsigned int ldo7_voltages[] = {
1800000, 1850000, 2850000, 2900000,
3000000, 3100000, 3200000, 3300000,
};
static const unsigned int ldo19_voltages[] = {
1800000, 1850000, 1900000, 1750000,
2800000, 2850000, 2900000, 3000000,
};
static const unsigned int ldo22_voltages[] = {
900000, 1000000, 1050000, 1100000,
1150000, 1175000, 1185000, 1200000,
};
enum hi655x_regulator_id {
HI655X_LDO0,
HI655X_LDO1,
HI655X_LDO2,
HI655X_LDO3,
HI655X_LDO4,
HI655X_LDO5,
HI655X_LDO6,
HI655X_LDO7,
HI655X_LDO8,
HI655X_LDO9,
HI655X_LDO10,
HI655X_LDO11,
HI655X_LDO12,
HI655X_LDO13,
HI655X_LDO14,
HI655X_LDO15,
HI655X_LDO16,
HI655X_LDO17,
HI655X_LDO18,
HI655X_LDO19,
HI655X_LDO20,
HI655X_LDO21,
HI655X_LDO22,
};
static int hi655x_is_enabled(struct regulator_dev *rdev)
{
unsigned int value = 0;
const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
regmap_read(rdev->regmap, regulator->status_reg, &value);
return (value & rdev->desc->enable_mask);
}
static int hi655x_disable(struct regulator_dev *rdev)
{
const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
return regmap_write(rdev->regmap, regulator->disable_reg,
rdev->desc->enable_mask);
}
static const struct regulator_ops hi655x_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = hi655x_disable,
.is_enabled = hi655x_is_enabled,
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_ops hi655x_ldo_linear_ops = {
.enable = regulator_enable_regmap,
.disable = hi655x_disable,
.is_enabled = hi655x_is_enabled,
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
#define HI655X_LDO(_ID, vreg, vmask, ereg, dreg, \
sreg, cmask, vtable) { \
.rdesc = { \
.name = #_ID, \
.of_match = of_match_ptr(#_ID), \
.ops = &hi655x_regulator_ops, \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = HI655X_##_ID, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(vtable), \
.volt_table = vtable, \
.vsel_reg = HI655X_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI655X_BUS_ADDR(ereg), \
.enable_mask = BIT(cmask), \
}, \
.disable_reg = HI655X_BUS_ADDR(dreg), \
.status_reg = HI655X_BUS_ADDR(sreg), \
}
#define HI655X_LDO_LINEAR(_ID, vreg, vmask, ereg, dreg, \
sreg, cmask, minv, nvolt, vstep) { \
.rdesc = { \
.name = #_ID, \
.of_match = of_match_ptr(#_ID), \
.ops = &hi655x_ldo_linear_ops, \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = HI655X_##_ID, \
.owner = THIS_MODULE, \
.min_uV = minv, \
.n_voltages = nvolt, \
.uV_step = vstep, \
.vsel_reg = HI655X_BUS_ADDR(vreg), \
.vsel_mask = vmask, \
.enable_reg = HI655X_BUS_ADDR(ereg), \
.enable_mask = BIT(cmask), \
}, \
.disable_reg = HI655X_BUS_ADDR(dreg), \
.status_reg = HI655X_BUS_ADDR(sreg), \
}
static const struct hi655x_regulator regulators[] = {
HI655X_LDO_LINEAR(LDO2, 0x72, 0x07, 0x29, 0x2a, 0x2b, 0x01,
2500000, 8, 100000),
HI655X_LDO(LDO7, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x06, ldo7_voltages),
HI655X_LDO(LDO10, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x01, ldo7_voltages),
HI655X_LDO_LINEAR(LDO13, 0x7e, 0x07, 0x2c, 0x2d, 0x2e, 0x04,
1600000, 8, 50000),
HI655X_LDO_LINEAR(LDO14, 0x7f, 0x07, 0x2c, 0x2d, 0x2e, 0x05,
2500000, 8, 100000),
HI655X_LDO_LINEAR(LDO15, 0x80, 0x07, 0x2c, 0x2d, 0x2e, 0x06,
1600000, 8, 50000),
HI655X_LDO_LINEAR(LDO17, 0x82, 0x07, 0x2f, 0x30, 0x31, 0x00,
2500000, 8, 100000),
HI655X_LDO(LDO19, 0x84, 0x07, 0x2f, 0x30, 0x31, 0x02, ldo19_voltages),
HI655X_LDO_LINEAR(LDO21, 0x86, 0x07, 0x2f, 0x30, 0x31, 0x04,
1650000, 8, 50000),
HI655X_LDO(LDO22, 0x87, 0x07, 0x2f, 0x30, 0x31, 0x05, ldo22_voltages),
};
static int hi655x_regulator_probe(struct platform_device *pdev)
{
unsigned int i;
struct hi655x_pmic *pmic;
struct regulator_config config = { };
struct regulator_dev *rdev;
pmic = dev_get_drvdata(pdev->dev.parent);
if (!pmic) {
dev_err(&pdev->dev, "no pmic in the regulator parent node\n");
return -ENODEV;
}
config.dev = pdev->dev.parent;
config.regmap = pmic->regmap;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
config.driver_data = (void *) ®ulators[i];
rdev = devm_regulator_register(&pdev->dev,
®ulators[i].rdesc,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
regulators[i].rdesc.name);
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct platform_device_id hi655x_regulator_table[] = {
{ .name = "hi655x-regulator" },
{},
};
MODULE_DEVICE_TABLE(platform, hi655x_regulator_table);
static struct platform_driver hi655x_regulator_driver = {
.id_table = hi655x_regulator_table,
.driver = {
.name = "hi655x-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = hi655x_regulator_probe,
};
module_platform_driver(hi655x_regulator_driver);
MODULE_AUTHOR("Chen Feng <[email protected]>");
MODULE_DESCRIPTION("Hisilicon Hi655x regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/hi655x-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// System Control and Management Interface (SCMI) based regulator driver
//
// Copyright (C) 2020-2021 ARM Ltd.
//
// Implements a regulator driver on top of the SCMI Voltage Protocol.
//
// The ARM SCMI Protocol aims in general to hide as much as possible all the
// underlying operational details while providing an abstracted interface for
// its users to operate upon: as a consequence the resulting operational
// capabilities and configurability of this regulator device are much more
// limited than the ones usually available on a standard physical regulator.
//
// The supported SCMI regulator ops are restricted to the bare minimum:
//
// - 'status_ops': enable/disable/is_enabled
// - 'voltage_ops': get_voltage_sel/set_voltage_sel
// list_voltage/map_voltage
//
// Each SCMI regulator instance is associated, through the means of a proper DT
// entry description, to a specific SCMI Voltage Domain.
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/linear_range.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/scmi_protocol.h>
#include <linux/slab.h>
#include <linux/types.h>
static const struct scmi_voltage_proto_ops *voltage_ops;
struct scmi_regulator {
u32 id;
struct scmi_device *sdev;
struct scmi_protocol_handle *ph;
struct regulator_dev *rdev;
struct device_node *of_node;
struct regulator_desc desc;
struct regulator_config conf;
};
struct scmi_regulator_info {
int num_doms;
struct scmi_regulator **sregv;
};
static int scmi_reg_enable(struct regulator_dev *rdev)
{
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
return voltage_ops->config_set(sreg->ph, sreg->id,
SCMI_VOLTAGE_ARCH_STATE_ON);
}
static int scmi_reg_disable(struct regulator_dev *rdev)
{
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
return voltage_ops->config_set(sreg->ph, sreg->id,
SCMI_VOLTAGE_ARCH_STATE_OFF);
}
static int scmi_reg_is_enabled(struct regulator_dev *rdev)
{
int ret;
u32 config;
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
ret = voltage_ops->config_get(sreg->ph, sreg->id, &config);
if (ret) {
dev_err(&sreg->sdev->dev,
"Error %d reading regulator %s status.\n",
ret, sreg->desc.name);
return ret;
}
return config & SCMI_VOLTAGE_ARCH_STATE_ON;
}
static int scmi_reg_get_voltage_sel(struct regulator_dev *rdev)
{
int ret;
s32 volt_uV;
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
ret = voltage_ops->level_get(sreg->ph, sreg->id, &volt_uV);
if (ret)
return ret;
return sreg->desc.ops->map_voltage(rdev, volt_uV, volt_uV);
}
static int scmi_reg_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
s32 volt_uV;
struct scmi_regulator *sreg = rdev_get_drvdata(rdev);
volt_uV = sreg->desc.ops->list_voltage(rdev, selector);
if (volt_uV <= 0)
return -EINVAL;
return voltage_ops->level_set(sreg->ph, sreg->id, 0x0, volt_uV);
}
static const struct regulator_ops scmi_reg_fixed_ops = {
.enable = scmi_reg_enable,
.disable = scmi_reg_disable,
.is_enabled = scmi_reg_is_enabled,
};
static const struct regulator_ops scmi_reg_linear_ops = {
.enable = scmi_reg_enable,
.disable = scmi_reg_disable,
.is_enabled = scmi_reg_is_enabled,
.get_voltage_sel = scmi_reg_get_voltage_sel,
.set_voltage_sel = scmi_reg_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
};
static const struct regulator_ops scmi_reg_discrete_ops = {
.enable = scmi_reg_enable,
.disable = scmi_reg_disable,
.is_enabled = scmi_reg_is_enabled,
.get_voltage_sel = scmi_reg_get_voltage_sel,
.set_voltage_sel = scmi_reg_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
};
static int
scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
const struct scmi_voltage_info *vinfo)
{
s32 delta_uV;
/*
* Note that SCMI voltage domains describable by linear ranges
* (segments) {low, high, step} are guaranteed to come in one single
* triplet by the SCMI Voltage Domain protocol support itself.
*/
delta_uV = (vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_HIGH] -
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW]);
/* Rule out buggy negative-intervals answers from fw */
if (delta_uV < 0) {
dev_err(&sreg->sdev->dev,
"Invalid volt-range %d-%duV for domain %d\n",
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW],
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_HIGH],
sreg->id);
return -EINVAL;
}
if (!delta_uV) {
/* Just one fixed voltage exposed by SCMI */
sreg->desc.fixed_uV =
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW];
sreg->desc.n_voltages = 1;
sreg->desc.ops = &scmi_reg_fixed_ops;
} else {
/* One simple linear mapping. */
sreg->desc.min_uV =
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW];
sreg->desc.uV_step =
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
sreg->desc.linear_min_sel = 0;
sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
sreg->desc.ops = &scmi_reg_linear_ops;
}
return 0;
}
static int
scmi_config_discrete_regulator_mappings(struct scmi_regulator *sreg,
const struct scmi_voltage_info *vinfo)
{
/* Discrete non linear levels are mapped to volt_table */
sreg->desc.n_voltages = vinfo->num_levels;
if (sreg->desc.n_voltages > 1) {
sreg->desc.volt_table = (const unsigned int *)vinfo->levels_uv;
sreg->desc.ops = &scmi_reg_discrete_ops;
} else {
sreg->desc.fixed_uV = vinfo->levels_uv[0];
sreg->desc.ops = &scmi_reg_fixed_ops;
}
return 0;
}
static int scmi_regulator_common_init(struct scmi_regulator *sreg)
{
int ret;
struct device *dev = &sreg->sdev->dev;
const struct scmi_voltage_info *vinfo;
vinfo = voltage_ops->info_get(sreg->ph, sreg->id);
if (!vinfo) {
dev_warn(dev, "Failure to get voltage domain %d\n",
sreg->id);
return -ENODEV;
}
/*
* Regulator framework does not fully support negative voltages
* so we discard any voltage domain reported as supporting negative
* voltages: as a consequence each levels_uv entry is guaranteed to
* be non-negative from here on.
*/
if (vinfo->negative_volts_allowed) {
dev_warn(dev, "Negative voltages NOT supported...skip %s\n",
sreg->of_node->full_name);
return -EOPNOTSUPP;
}
sreg->desc.name = devm_kasprintf(dev, GFP_KERNEL, "%s", vinfo->name);
if (!sreg->desc.name)
return -ENOMEM;
sreg->desc.id = sreg->id;
sreg->desc.type = REGULATOR_VOLTAGE;
sreg->desc.owner = THIS_MODULE;
sreg->desc.of_match_full_name = true;
sreg->desc.of_match = sreg->of_node->full_name;
sreg->desc.regulators_node = "regulators";
if (vinfo->segmented)
ret = scmi_config_linear_regulator_mappings(sreg, vinfo);
else
ret = scmi_config_discrete_regulator_mappings(sreg, vinfo);
if (ret)
return ret;
/*
* Using the scmi device here to have DT searched from Voltage
* protocol node down.
*/
sreg->conf.dev = dev;
/* Store for later retrieval via rdev_get_drvdata() */
sreg->conf.driver_data = sreg;
return 0;
}
static int process_scmi_regulator_of_node(struct scmi_device *sdev,
struct scmi_protocol_handle *ph,
struct device_node *np,
struct scmi_regulator_info *rinfo)
{
u32 dom, ret;
ret = of_property_read_u32(np, "reg", &dom);
if (ret)
return ret;
if (dom >= rinfo->num_doms)
return -ENODEV;
if (rinfo->sregv[dom]) {
dev_err(&sdev->dev,
"SCMI Voltage Domain %d already in use. Skipping: %s\n",
dom, np->full_name);
return -EINVAL;
}
rinfo->sregv[dom] = devm_kzalloc(&sdev->dev,
sizeof(struct scmi_regulator),
GFP_KERNEL);
if (!rinfo->sregv[dom])
return -ENOMEM;
rinfo->sregv[dom]->id = dom;
rinfo->sregv[dom]->sdev = sdev;
rinfo->sregv[dom]->ph = ph;
/* get hold of good nodes */
of_node_get(np);
rinfo->sregv[dom]->of_node = np;
dev_dbg(&sdev->dev,
"Found SCMI Regulator entry -- OF node [%d] -> %s\n",
dom, np->full_name);
return 0;
}
static int scmi_regulator_probe(struct scmi_device *sdev)
{
int d, ret, num_doms;
struct device_node *np, *child;
const struct scmi_handle *handle = sdev->handle;
struct scmi_regulator_info *rinfo;
struct scmi_protocol_handle *ph;
if (!handle)
return -ENODEV;
voltage_ops = handle->devm_protocol_get(sdev,
SCMI_PROTOCOL_VOLTAGE, &ph);
if (IS_ERR(voltage_ops))
return PTR_ERR(voltage_ops);
num_doms = voltage_ops->num_domains_get(ph);
if (!num_doms)
return 0;
if (num_doms < 0) {
dev_err(&sdev->dev, "failed to get voltage domains - err:%d\n",
num_doms);
return num_doms;
}
rinfo = devm_kzalloc(&sdev->dev, sizeof(*rinfo), GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
/* Allocate pointers array for all possible domains */
rinfo->sregv = devm_kcalloc(&sdev->dev, num_doms,
sizeof(void *), GFP_KERNEL);
if (!rinfo->sregv)
return -ENOMEM;
rinfo->num_doms = num_doms;
/*
* Start collecting into rinfo->sregv possibly good SCMI Regulators as
* described by a well-formed DT entry and associated with an existing
* plausible SCMI Voltage Domain number, all belonging to this SCMI
* platform instance node (handle->dev->of_node).
*/
of_node_get(handle->dev->of_node);
np = of_find_node_by_name(handle->dev->of_node, "regulators");
for_each_child_of_node(np, child) {
ret = process_scmi_regulator_of_node(sdev, ph, child, rinfo);
/* abort on any mem issue */
if (ret == -ENOMEM) {
of_node_put(child);
return ret;
}
}
of_node_put(np);
/*
* Register a regulator for each valid regulator-DT-entry that we
* can successfully reach via SCMI and has a valid associated voltage
* domain.
*/
for (d = 0; d < num_doms; d++) {
struct scmi_regulator *sreg = rinfo->sregv[d];
/* Skip empty slots */
if (!sreg)
continue;
ret = scmi_regulator_common_init(sreg);
/* Skip invalid voltage domains */
if (ret)
continue;
sreg->rdev = devm_regulator_register(&sdev->dev, &sreg->desc,
&sreg->conf);
if (IS_ERR(sreg->rdev)) {
sreg->rdev = NULL;
continue;
}
dev_info(&sdev->dev,
"Regulator %s registered for domain [%d]\n",
sreg->desc.name, sreg->id);
}
dev_set_drvdata(&sdev->dev, rinfo);
return 0;
}
static void scmi_regulator_remove(struct scmi_device *sdev)
{
int d;
struct scmi_regulator_info *rinfo;
rinfo = dev_get_drvdata(&sdev->dev);
if (!rinfo)
return;
for (d = 0; d < rinfo->num_doms; d++) {
if (!rinfo->sregv[d])
continue;
of_node_put(rinfo->sregv[d]->of_node);
}
}
static const struct scmi_device_id scmi_regulator_id_table[] = {
{ SCMI_PROTOCOL_VOLTAGE, "regulator" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_regulator_id_table);
static struct scmi_driver scmi_drv = {
.name = "scmi-regulator",
.probe = scmi_regulator_probe,
.remove = scmi_regulator_remove,
.id_table = scmi_regulator_id_table,
};
module_scmi_driver(scmi_drv);
MODULE_AUTHOR("Cristian Marussi <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/scmi-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// mpq7920.c - regulator driver for mps mpq7920
//
// Copyright 2019 Monolithic Power Systems, Inc
//
// Author: Saravanan Sekar <[email protected]>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include "mpq7920.h"
#define MPQ7920_BUCK_VOLT_RANGE \
((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
#define MPQ7920_LDO_VOLT_RANGE \
((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
#define MPQ7920BUCK(_name, _id, _ilim) \
[MPQ7920_BUCK ## _id] = { \
.id = MPQ7920_BUCK ## _id, \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.of_parse_cb = mpq7920_parse_cb, \
.ops = &mpq7920_buck_ops, \
.min_uV = MPQ7920_BUCK_VOLT_MIN, \
.uV_step = MPQ7920_VOLT_STEP, \
.n_voltages = MPQ7920_BUCK_VOLT_RANGE, \
.curr_table = _ilim, \
.n_current_limits = ARRAY_SIZE(_ilim), \
.csel_reg = MPQ7920_BUCK ##_id## _REG_C, \
.csel_mask = MPQ7920_MASK_BUCK_ILIM, \
.enable_reg = MPQ7920_REG_REGULATOR_EN, \
.enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
MPQ7920_BUCK ## _id), \
.vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \
.vsel_mask = MPQ7920_MASK_VREF, \
.active_discharge_on = MPQ7920_DISCHARGE_ON, \
.active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \
.active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
.soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \
.soft_start_mask = MPQ7920_MASK_SOFTSTART, \
.owner = THIS_MODULE, \
}
#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \
[MPQ7920_LDO ## _id] = { \
.id = MPQ7920_LDO ## _id, \
.name = _name, \
.of_match = _name, \
.regulators_node = "regulators", \
.ops = _ops, \
.min_uV = MPQ7920_LDO_VOLT_MIN, \
.uV_step = MPQ7920_VOLT_STEP, \
.n_voltages = MPQ7920_LDO_VOLT_RANGE, \
.vsel_reg = MPQ7920_LDO ##_id## _REG_A, \
.vsel_mask = MPQ7920_MASK_VREF, \
.curr_table = _ilim, \
.n_current_limits = _ilim_sz, \
.csel_reg = _creg, \
.csel_mask = _cmask, \
.enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\
.enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
MPQ7920_LDO ##_id + 1), \
.active_discharge_on = MPQ7920_DISCHARGE_ON, \
.active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
.active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
enum mpq7920_regulators {
MPQ7920_BUCK1,
MPQ7920_BUCK2,
MPQ7920_BUCK3,
MPQ7920_BUCK4,
MPQ7920_LDO1, /* LDORTC */
MPQ7920_LDO2,
MPQ7920_LDO3,
MPQ7920_LDO4,
MPQ7920_LDO5,
MPQ7920_MAX_REGULATORS,
};
struct mpq7920_regulator_info {
struct regmap *regmap;
struct regulator_desc *rdesc;
};
static const struct regmap_config mpq7920_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x25,
};
/* Current limits array (in uA)
* ILIM1 & ILIM3
*/
static const unsigned int mpq7920_I_limits1[] = {
4600000, 6600000, 7600000, 9300000
};
/* ILIM2 & ILIM4 */
static const unsigned int mpq7920_I_limits2[] = {
2700000, 3900000, 5100000, 6100000
};
/* LDO4 & LDO5 */
static const unsigned int mpq7920_I_limits3[] = {
300000, 700000
};
static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
static int mpq7920_parse_cb(struct device_node *np,
const struct regulator_desc *rdesc,
struct regulator_config *config);
/* RTCLDO not controllable, always ON */
static const struct regulator_ops mpq7920_ldortc_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_ops mpq7920_ldo_wo_current_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
};
static const struct regulator_ops mpq7920_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
.set_current_limit = regulator_set_current_limit_regmap,
};
static const struct regulator_ops mpq7920_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
.set_soft_start = regulator_set_soft_start_regmap,
.set_ramp_delay = mpq7920_set_ramp_delay,
};
static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = {
MPQ7920BUCK("buck1", 1, mpq7920_I_limits1),
MPQ7920BUCK("buck2", 2, mpq7920_I_limits2),
MPQ7920BUCK("buck3", 3, mpq7920_I_limits1),
MPQ7920BUCK("buck4", 4, mpq7920_I_limits2),
MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0),
MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3,
ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B,
MPQ7920_MASK_LDO_ILIM),
MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3,
ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B,
MPQ7920_MASK_LDO_ILIM),
};
/*
* DVS ramp rate BUCK1 to BUCK4
* 00-01: Reserved
* 10: 8mV/us
* 11: 4mV/us
*/
static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_val;
if (ramp_delay > 8000 || ramp_delay < 0)
return -EINVAL;
if (ramp_delay <= 4000)
ramp_val = 3;
else
ramp_val = 2;
return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0,
MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6);
}
static int mpq7920_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
uint8_t val;
int ret;
struct mpq7920_regulator_info *info = config->driver_data;
struct regulator_desc *rdesc = &info->rdesc[desc->id];
if (of_property_read_bool(np, "mps,buck-ovp-disable")) {
regmap_update_bits(config->regmap,
MPQ7920_BUCK1_REG_B + (rdesc->id * 4),
MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE);
}
ret = of_property_read_u8(np, "mps,buck-phase-delay", &val);
if (!ret) {
regmap_update_bits(config->regmap,
MPQ7920_BUCK1_REG_C + (rdesc->id * 4),
MPQ7920_MASK_BUCK_PHASE_DEALY,
(val & 3) << 4);
}
ret = of_property_read_u8(np, "mps,buck-softstart", &val);
if (!ret)
rdesc->soft_start_val_on = (val & 3) << 2;
return 0;
}
static void mpq7920_parse_dt(struct device *dev,
struct mpq7920_regulator_info *info)
{
int ret;
struct device_node *np = dev->of_node;
uint8_t freq;
np = of_get_child_by_name(np, "regulators");
if (!np) {
dev_err(dev, "missing 'regulators' subnode in DT\n");
return;
}
ret = of_property_read_u8(np, "mps,switch-freq", &freq);
if (!ret) {
regmap_update_bits(info->regmap, MPQ7920_REG_CTL0,
MPQ7920_MASK_SWITCH_FREQ,
(freq & 3) << 4);
}
of_node_put(np);
}
static int mpq7920_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct mpq7920_regulator_info *info;
struct regulator_config config = { NULL, };
struct regulator_dev *rdev;
struct regmap *regmap;
int i;
info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->rdesc = mpq7920_regulators_desc;
regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to allocate regmap!\n");
return PTR_ERR(regmap);
}
i2c_set_clientdata(client, info);
info->regmap = regmap;
if (client->dev.of_node)
mpq7920_parse_dt(&client->dev, info);
config.dev = dev;
config.regmap = regmap;
config.driver_data = info;
for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) {
rdev = devm_regulator_register(dev,
&mpq7920_regulators_desc[i],
&config);
if (IS_ERR(rdev)) {
dev_err(dev, "Failed to register regulator!\n");
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct of_device_id mpq7920_of_match[] = {
{ .compatible = "mps,mpq7920"},
{},
};
MODULE_DEVICE_TABLE(of, mpq7920_of_match);
static const struct i2c_device_id mpq7920_id[] = {
{ "mpq7920", },
{ },
};
MODULE_DEVICE_TABLE(i2c, mpq7920_id);
static struct i2c_driver mpq7920_regulator_driver = {
.driver = {
.name = "mpq7920",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = mpq7920_of_match,
},
.probe = mpq7920_i2c_probe,
.id_table = mpq7920_id,
};
module_i2c_driver(mpq7920_regulator_driver);
MODULE_AUTHOR("Saravanan Sekar <[email protected]>");
MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/mpq7920.c |
// SPDX-License-Identifier: GPL-2.0
//
// Regulator Driver for Freescale MC13892 PMIC
//
// Copyright 2010 Yong Shen <[email protected]>
//
// Based on draft driver from Arnaud Patard <[email protected]>
#include <linux/mfd/mc13892.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include "mc13xxx.h"
#define MC13892_REVISION 7
#define MC13892_POWERCTL0 13
#define MC13892_POWERCTL0_USEROFFSPI 3
#define MC13892_POWERCTL0_VCOINCELLVSEL 20
#define MC13892_POWERCTL0_VCOINCELLVSEL_M (7<<20)
#define MC13892_POWERCTL0_VCOINCELLEN (1<<23)
#define MC13892_SWITCHERS0_SWxHI (1<<23)
#define MC13892_SWITCHERS0 24
#define MC13892_SWITCHERS0_SW1VSEL 0
#define MC13892_SWITCHERS0_SW1VSEL_M (0x1f<<0)
#define MC13892_SWITCHERS0_SW1HI (1<<23)
#define MC13892_SWITCHERS0_SW1EN 0
#define MC13892_SWITCHERS1 25
#define MC13892_SWITCHERS1_SW2VSEL 0
#define MC13892_SWITCHERS1_SW2VSEL_M (0x1f<<0)
#define MC13892_SWITCHERS1_SW2HI (1<<23)
#define MC13892_SWITCHERS1_SW2EN 0
#define MC13892_SWITCHERS2 26
#define MC13892_SWITCHERS2_SW3VSEL 0
#define MC13892_SWITCHERS2_SW3VSEL_M (0x1f<<0)
#define MC13892_SWITCHERS2_SW3HI (1<<23)
#define MC13892_SWITCHERS2_SW3EN 0
#define MC13892_SWITCHERS3 27
#define MC13892_SWITCHERS3_SW4VSEL 0
#define MC13892_SWITCHERS3_SW4VSEL_M (0x1f<<0)
#define MC13892_SWITCHERS3_SW4HI (1<<23)
#define MC13892_SWITCHERS3_SW4EN 0
#define MC13892_SWITCHERS4 28
#define MC13892_SWITCHERS4_SW1MODE 0
#define MC13892_SWITCHERS4_SW1MODE_AUTO (8<<0)
#define MC13892_SWITCHERS4_SW1MODE_M (0xf<<0)
#define MC13892_SWITCHERS4_SW2MODE 10
#define MC13892_SWITCHERS4_SW2MODE_AUTO (8<<10)
#define MC13892_SWITCHERS4_SW2MODE_M (0xf<<10)
#define MC13892_SWITCHERS5 29
#define MC13892_SWITCHERS5_SW3MODE 0
#define MC13892_SWITCHERS5_SW3MODE_AUTO (8<<0)
#define MC13892_SWITCHERS5_SW3MODE_M (0xf<<0)
#define MC13892_SWITCHERS5_SW4MODE 8
#define MC13892_SWITCHERS5_SW4MODE_AUTO (8<<8)
#define MC13892_SWITCHERS5_SW4MODE_M (0xf<<8)
#define MC13892_SWITCHERS5_SWBSTEN (1<<20)
#define MC13892_REGULATORSETTING0 30
#define MC13892_REGULATORSETTING0_VGEN1VSEL 0
#define MC13892_REGULATORSETTING0_VDIGVSEL 4
#define MC13892_REGULATORSETTING0_VGEN2VSEL 6
#define MC13892_REGULATORSETTING0_VPLLVSEL 9
#define MC13892_REGULATORSETTING0_VUSB2VSEL 11
#define MC13892_REGULATORSETTING0_VGEN3VSEL 14
#define MC13892_REGULATORSETTING0_VCAMVSEL 16
#define MC13892_REGULATORSETTING0_VGEN1VSEL_M (3<<0)
#define MC13892_REGULATORSETTING0_VDIGVSEL_M (3<<4)
#define MC13892_REGULATORSETTING0_VGEN2VSEL_M (7<<6)
#define MC13892_REGULATORSETTING0_VPLLVSEL_M (3<<9)
#define MC13892_REGULATORSETTING0_VUSB2VSEL_M (3<<11)
#define MC13892_REGULATORSETTING0_VGEN3VSEL_M (1<<14)
#define MC13892_REGULATORSETTING0_VCAMVSEL_M (3<<16)
#define MC13892_REGULATORSETTING1 31
#define MC13892_REGULATORSETTING1_VVIDEOVSEL 2
#define MC13892_REGULATORSETTING1_VAUDIOVSEL 4
#define MC13892_REGULATORSETTING1_VSDVSEL 6
#define MC13892_REGULATORSETTING1_VVIDEOVSEL_M (3<<2)
#define MC13892_REGULATORSETTING1_VAUDIOVSEL_M (3<<4)
#define MC13892_REGULATORSETTING1_VSDVSEL_M (7<<6)
#define MC13892_REGULATORMODE0 32
#define MC13892_REGULATORMODE0_VGEN1EN (1<<0)
#define MC13892_REGULATORMODE0_VGEN1STDBY (1<<1)
#define MC13892_REGULATORMODE0_VGEN1MODE (1<<2)
#define MC13892_REGULATORMODE0_VIOHIEN (1<<3)
#define MC13892_REGULATORMODE0_VIOHISTDBY (1<<4)
#define MC13892_REGULATORMODE0_VIOHIMODE (1<<5)
#define MC13892_REGULATORMODE0_VDIGEN (1<<9)
#define MC13892_REGULATORMODE0_VDIGSTDBY (1<<10)
#define MC13892_REGULATORMODE0_VDIGMODE (1<<11)
#define MC13892_REGULATORMODE0_VGEN2EN (1<<12)
#define MC13892_REGULATORMODE0_VGEN2STDBY (1<<13)
#define MC13892_REGULATORMODE0_VGEN2MODE (1<<14)
#define MC13892_REGULATORMODE0_VPLLEN (1<<15)
#define MC13892_REGULATORMODE0_VPLLSTDBY (1<<16)
#define MC13892_REGULATORMODE0_VPLLMODE (1<<17)
#define MC13892_REGULATORMODE0_VUSB2EN (1<<18)
#define MC13892_REGULATORMODE0_VUSB2STDBY (1<<19)
#define MC13892_REGULATORMODE0_VUSB2MODE (1<<20)
#define MC13892_REGULATORMODE1 33
#define MC13892_REGULATORMODE1_VGEN3EN (1<<0)
#define MC13892_REGULATORMODE1_VGEN3STDBY (1<<1)
#define MC13892_REGULATORMODE1_VGEN3MODE (1<<2)
#define MC13892_REGULATORMODE1_VCAMEN (1<<6)
#define MC13892_REGULATORMODE1_VCAMSTDBY (1<<7)
#define MC13892_REGULATORMODE1_VCAMMODE (1<<8)
#define MC13892_REGULATORMODE1_VCAMCONFIGEN (1<<9)
#define MC13892_REGULATORMODE1_VVIDEOEN (1<<12)
#define MC13892_REGULATORMODE1_VVIDEOSTDBY (1<<13)
#define MC13892_REGULATORMODE1_VVIDEOMODE (1<<14)
#define MC13892_REGULATORMODE1_VAUDIOEN (1<<15)
#define MC13892_REGULATORMODE1_VAUDIOSTDBY (1<<16)
#define MC13892_REGULATORMODE1_VAUDIOMODE (1<<17)
#define MC13892_REGULATORMODE1_VSDEN (1<<18)
#define MC13892_REGULATORMODE1_VSDSTDBY (1<<19)
#define MC13892_REGULATORMODE1_VSDMODE (1<<20)
#define MC13892_POWERMISC 34
#define MC13892_POWERMISC_GPO1EN (1<<6)
#define MC13892_POWERMISC_GPO2EN (1<<8)
#define MC13892_POWERMISC_GPO3EN (1<<10)
#define MC13892_POWERMISC_GPO4EN (1<<12)
#define MC13892_POWERMISC_PWGT1SPIEN (1<<15)
#define MC13892_POWERMISC_PWGT2SPIEN (1<<16)
#define MC13892_POWERMISC_GPO4ADINEN (1<<21)
#define MC13892_POWERMISC_PWGTSPI_M (3 << 15)
#define MC13892_USB1 50
#define MC13892_USB1_VUSBEN (1<<3)
static const unsigned int mc13892_vcoincell[] = {
2500000, 2700000, 2800000, 2900000, 3000000, 3100000,
3200000, 3300000,
};
static const unsigned int mc13892_sw1[] = {
600000, 625000, 650000, 675000, 700000, 725000,
750000, 775000, 800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000, 1000000, 1025000,
1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
1350000, 1375000
};
/*
* Note: this table is used to derive SWxVSEL by index into
* the array. Offset the values by the index of 1100000uV
* to get the actual register value for that voltage selector
* if the HI bit is to be set as well.
*/
#define MC13892_SWxHI_SEL_OFFSET 20
static const unsigned int mc13892_sw[] = {
600000, 625000, 650000, 675000, 700000, 725000,
750000, 775000, 800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000, 1000000, 1025000,
1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
1350000, 1375000, 1400000, 1425000, 1450000, 1475000,
1500000, 1525000, 1550000, 1575000, 1600000, 1625000,
1650000, 1675000, 1700000, 1725000, 1750000, 1775000,
1800000, 1825000, 1850000, 1875000
};
static const unsigned int mc13892_swbst[] = {
5000000,
};
static const unsigned int mc13892_viohi[] = {
2775000,
};
static const unsigned int mc13892_vpll[] = {
1050000, 1250000, 1650000, 1800000,
};
static const unsigned int mc13892_vdig[] = {
1050000, 1250000, 1650000, 1800000,
};
static const unsigned int mc13892_vsd[] = {
1800000, 2000000, 2600000, 2700000,
2800000, 2900000, 3000000, 3150000,
};
static const unsigned int mc13892_vusb2[] = {
2400000, 2600000, 2700000, 2775000,
};
static const unsigned int mc13892_vvideo[] = {
2700000, 2775000, 2500000, 2600000,
};
static const unsigned int mc13892_vaudio[] = {
2300000, 2500000, 2775000, 3000000,
};
static const unsigned int mc13892_vcam[] = {
2500000, 2600000, 2750000, 3000000,
};
static const unsigned int mc13892_vgen1[] = {
1200000, 1500000, 2775000, 3150000,
};
static const unsigned int mc13892_vgen2[] = {
1200000, 1500000, 1600000, 1800000,
2700000, 2800000, 3000000, 3150000,
};
static const unsigned int mc13892_vgen3[] = {
1800000, 2900000,
};
static const unsigned int mc13892_vusb[] = {
3300000,
};
static const unsigned int mc13892_gpo[] = {
2750000,
};
static const unsigned int mc13892_pwgtdrv[] = {
5000000,
};
static const struct regulator_ops mc13892_gpo_regulator_ops;
static const struct regulator_ops mc13892_sw_regulator_ops;
#define MC13892_FIXED_DEFINE(name, node, reg, voltages) \
MC13xxx_FIXED_DEFINE(MC13892_, name, node, reg, voltages, \
mc13xxx_fixed_regulator_ops)
#define MC13892_GPO_DEFINE(name, node, reg, voltages) \
MC13xxx_GPO_DEFINE(MC13892_, name, node, reg, voltages, \
mc13892_gpo_regulator_ops)
#define MC13892_SW_DEFINE(name, node, reg, vsel_reg, voltages) \
MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
mc13892_sw_regulator_ops)
#define MC13892_DEFINE_REGU(name, node, reg, vsel_reg, voltages) \
MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
mc13xxx_regulator_ops)
static struct mc13xxx_regulator mc13892_regulators[] = {
MC13892_DEFINE_REGU(VCOINCELL, vcoincell, POWERCTL0, POWERCTL0, mc13892_vcoincell),
MC13892_SW_DEFINE(SW1, sw1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
MC13892_SW_DEFINE(SW2, sw2, SWITCHERS1, SWITCHERS1, mc13892_sw),
MC13892_SW_DEFINE(SW3, sw3, SWITCHERS2, SWITCHERS2, mc13892_sw),
MC13892_SW_DEFINE(SW4, sw4, SWITCHERS3, SWITCHERS3, mc13892_sw),
MC13892_FIXED_DEFINE(SWBST, swbst, SWITCHERS5, mc13892_swbst),
MC13892_FIXED_DEFINE(VIOHI, viohi, REGULATORMODE0, mc13892_viohi),
MC13892_DEFINE_REGU(VPLL, vpll, REGULATORMODE0, REGULATORSETTING0,
mc13892_vpll),
MC13892_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0,
mc13892_vdig),
MC13892_DEFINE_REGU(VSD, vsd, REGULATORMODE1, REGULATORSETTING1,
mc13892_vsd),
MC13892_DEFINE_REGU(VUSB2, vusb2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vusb2),
MC13892_DEFINE_REGU(VVIDEO, vvideo, REGULATORMODE1, REGULATORSETTING1,
mc13892_vvideo),
MC13892_DEFINE_REGU(VAUDIO, vaudio, REGULATORMODE1, REGULATORSETTING1,
mc13892_vaudio),
MC13892_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0,
mc13892_vcam),
MC13892_DEFINE_REGU(VGEN1, vgen1, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen1),
MC13892_DEFINE_REGU(VGEN2, vgen2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen2),
MC13892_DEFINE_REGU(VGEN3, vgen3, REGULATORMODE1, REGULATORSETTING0,
mc13892_vgen3),
MC13892_FIXED_DEFINE(VUSB, vusb, USB1, mc13892_vusb),
MC13892_GPO_DEFINE(GPO1, gpo1, POWERMISC, mc13892_gpo),
MC13892_GPO_DEFINE(GPO2, gpo2, POWERMISC, mc13892_gpo),
MC13892_GPO_DEFINE(GPO3, gpo3, POWERMISC, mc13892_gpo),
MC13892_GPO_DEFINE(GPO4, gpo4, POWERMISC, mc13892_gpo),
MC13892_GPO_DEFINE(PWGT1SPI, pwgt1spi, POWERMISC, mc13892_pwgtdrv),
MC13892_GPO_DEFINE(PWGT2SPI, pwgt2spi, POWERMISC, mc13892_pwgtdrv),
};
static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
u32 val)
{
struct mc13xxx *mc13892 = priv->mc13xxx;
int ret;
u32 valread;
BUG_ON(val & ~mask);
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread);
if (ret)
goto out;
/* Update the stored state for Power Gates. */
priv->powermisc_pwgt_state =
(priv->powermisc_pwgt_state & ~mask) | val;
priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M;
/* Construct the new register value */
valread = (valread & ~mask) | val;
/* Overwrite the PWGTxEN with the stored version */
valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) |
priv->powermisc_pwgt_state;
ret = mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread);
out:
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
u32 en_val = mc13892_regulators[id].enable_bit;
u32 mask = mc13892_regulators[id].enable_bit;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
/* Power Gate enable value is 0 */
if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
en_val = 0;
if (id == MC13892_GPO4)
mask |= MC13892_POWERMISC_GPO4ADINEN;
return mc13892_powermisc_rmw(priv, mask, en_val);
}
static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
u32 dis_val = 0;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
/* Power Gate disable value is 1 */
if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
dis_val = mc13892_regulators[id].enable_bit;
return mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit,
dis_val);
}
static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int ret, id = rdev_get_id(rdev);
unsigned int val;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
/* Power Gates state is stored in powermisc_pwgt_state
* where the meaning of bits is negated */
val = (val & ~MC13892_POWERMISC_PWGTSPI_M) |
(priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M);
return (val & mc13892_regulators[id].enable_bit) != 0;
}
static const struct regulator_ops mc13892_gpo_regulator_ops = {
.enable = mc13892_gpo_regulator_enable,
.disable = mc13892_gpo_regulator_disable,
.is_enabled = mc13892_gpo_regulator_is_enabled,
.list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
};
static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int ret, id = rdev_get_id(rdev);
unsigned int val, selector;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx,
mc13892_regulators[id].vsel_reg, &val);
mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
/*
* Figure out if the HI bit is set inside the switcher mode register
* since this means the selector value we return is at a different
* offset into the selector table.
*
* According to the MC13892 documentation note 59 (Table 47) the SW1
* buck switcher does not support output range programming therefore
* the HI bit must always remain 0. So do not do anything strange if
* our register is MC13892_SWITCHERS0.
*/
selector = val & mc13892_regulators[id].vsel_mask;
if ((mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) &&
(val & MC13892_SWITCHERS0_SWxHI)) {
selector += MC13892_SWxHI_SEL_OFFSET;
}
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: 0x%08x selector: %d\n",
__func__, id, val, selector);
return selector;
}
static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int volt, mask, id = rdev_get_id(rdev);
u32 reg_value;
int ret;
volt = rdev->desc->volt_table[selector];
mask = mc13892_regulators[id].vsel_mask;
reg_value = selector;
/*
* Don't mess with the HI bit or support HI voltage offsets for SW1.
*
* Since the get_voltage_sel callback has given a fudged value for
* the selector offset, we need to back out that offset if HI is
* to be set so we write the correct value to the register.
*
* The HI bit addition and selector offset handling COULD be more
* complicated by shifting and masking off the voltage selector part
* of the register then logical OR it back in, but since the selector
* is at bits 4:0 there is very little point. This makes the whole
* thing more readable and we do far less work.
*/
if (mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) {
mask |= MC13892_SWITCHERS0_SWxHI;
if (volt > 1375000) {
reg_value -= MC13892_SWxHI_SEL_OFFSET;
reg_value |= MC13892_SWITCHERS0_SWxHI;
} else {
reg_value &= ~MC13892_SWITCHERS0_SWxHI;
}
}
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
mask, reg_value);
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static const struct regulator_ops mc13892_sw_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = mc13892_sw_regulator_set_voltage_sel,
.get_voltage_sel = mc13892_sw_regulator_get_voltage_sel,
};
static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
unsigned int en_val = 0;
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int ret, id = rdev_get_id(rdev);
if (mode == REGULATOR_MODE_FAST)
en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg,
MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val);
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
int ret, id = rdev_get_id(rdev);
unsigned int val;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN)
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_NORMAL;
}
static struct regulator_ops mc13892_vcam_ops;
static int mc13892_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
struct mc13xxx_regulator_init_data *mc13xxx_data;
struct regulator_config config = { };
int i, ret;
int num_regulators = 0;
u32 val;
num_regulators = mc13xxx_get_num_regulators_dt(pdev);
if (num_regulators <= 0 && pdata)
num_regulators = pdata->num_regulators;
if (num_regulators <= 0)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev,
struct_size(priv, regulators, num_regulators),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->num_regulators = num_regulators;
priv->mc13xxx_regulators = mc13892_regulators;
priv->mc13xxx = mc13892;
platform_set_drvdata(pdev, priv);
mc13xxx_lock(mc13892);
ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
if (ret)
goto err_unlock;
/* enable switch auto mode (on 2.0A silicon only) */
if ((val & 0x0000FFFF) == 0x45d0) {
ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
MC13892_SWITCHERS4_SW1MODE_M |
MC13892_SWITCHERS4_SW2MODE_M,
MC13892_SWITCHERS4_SW1MODE_AUTO |
MC13892_SWITCHERS4_SW2MODE_AUTO);
if (ret)
goto err_unlock;
ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5,
MC13892_SWITCHERS5_SW3MODE_M |
MC13892_SWITCHERS5_SW4MODE_M,
MC13892_SWITCHERS5_SW3MODE_AUTO |
MC13892_SWITCHERS5_SW4MODE_AUTO);
if (ret)
goto err_unlock;
}
mc13xxx_unlock(mc13892);
/* update mc13892_vcam ops */
memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
sizeof(struct regulator_ops));
mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode;
mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode;
mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
ARRAY_SIZE(mc13892_regulators));
for (i = 0; i < priv->num_regulators; i++) {
struct regulator_init_data *init_data;
struct regulator_desc *desc;
struct device_node *node = NULL;
int id;
if (mc13xxx_data) {
id = mc13xxx_data[i].id;
init_data = mc13xxx_data[i].init_data;
node = mc13xxx_data[i].node;
} else {
id = pdata->regulators[i].id;
init_data = pdata->regulators[i].init_data;
}
desc = &mc13892_regulators[id].desc;
config.dev = &pdev->dev;
config.init_data = init_data;
config.driver_data = priv;
config.of_node = node;
priv->regulators[i] = devm_regulator_register(&pdev->dev, desc,
&config);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
mc13892_regulators[i].desc.name);
return PTR_ERR(priv->regulators[i]);
}
}
return 0;
err_unlock:
mc13xxx_unlock(mc13892);
return ret;
}
static struct platform_driver mc13892_regulator_driver = {
.driver = {
.name = "mc13892-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mc13892_regulator_probe,
};
static int __init mc13892_regulator_init(void)
{
return platform_driver_register(&mc13892_regulator_driver);
}
subsys_initcall(mc13892_regulator_init);
static void __exit mc13892_regulator_exit(void)
{
platform_driver_unregister(&mc13892_regulator_driver);
}
module_exit(mc13892_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yong Shen <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC");
MODULE_ALIAS("platform:mc13892-regulator");
| linux-master | drivers/regulator/mc13892-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020, The Linux Foundation. All rights reserved.
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#define REG_PERPH_TYPE 0x04
#define QCOM_LAB_TYPE 0x24
#define QCOM_IBB_TYPE 0x20
#define PMI8998_LAB_REG_BASE 0xde00
#define PMI8998_IBB_REG_BASE 0xdc00
#define PMI8998_IBB_LAB_REG_OFFSET 0x200
#define REG_LABIBB_STATUS1 0x08
#define LABIBB_STATUS1_SC_BIT BIT(6)
#define LABIBB_STATUS1_VREG_OK_BIT BIT(7)
#define REG_LABIBB_INT_SET_TYPE 0x11
#define REG_LABIBB_INT_POLARITY_HIGH 0x12
#define REG_LABIBB_INT_POLARITY_LOW 0x13
#define REG_LABIBB_INT_LATCHED_CLR 0x14
#define REG_LABIBB_INT_EN_SET 0x15
#define REG_LABIBB_INT_EN_CLR 0x16
#define LABIBB_INT_VREG_OK BIT(0)
#define LABIBB_INT_VREG_TYPE_LEVEL 0
#define REG_LABIBB_VOLTAGE 0x41
#define LABIBB_VOLTAGE_OVERRIDE_EN BIT(7)
#define LAB_VOLTAGE_SET_MASK GENMASK(3, 0)
#define IBB_VOLTAGE_SET_MASK GENMASK(5, 0)
#define REG_LABIBB_ENABLE_CTL 0x46
#define LABIBB_CONTROL_ENABLE BIT(7)
#define REG_LABIBB_PD_CTL 0x47
#define LAB_PD_CTL_MASK GENMASK(1, 0)
#define IBB_PD_CTL_MASK (BIT(0) | BIT(7))
#define LAB_PD_CTL_STRONG_PULL BIT(0)
#define IBB_PD_CTL_HALF_STRENGTH BIT(0)
#define IBB_PD_CTL_EN BIT(7)
#define REG_LABIBB_CURRENT_LIMIT 0x4b
#define LAB_CURRENT_LIMIT_MASK GENMASK(2, 0)
#define IBB_CURRENT_LIMIT_MASK GENMASK(4, 0)
#define LAB_CURRENT_LIMIT_OVERRIDE_EN BIT(3)
#define LABIBB_CURRENT_LIMIT_EN BIT(7)
#define REG_IBB_PWRUP_PWRDN_CTL_1 0x58
#define IBB_CTL_1_DISCHARGE_EN BIT(2)
#define REG_LABIBB_SOFT_START_CTL 0x5f
#define REG_LABIBB_SEC_ACCESS 0xd0
#define LABIBB_SEC_UNLOCK_CODE 0xa5
#define LAB_ENABLE_CTL_MASK BIT(7)
#define IBB_ENABLE_CTL_MASK (BIT(7) | BIT(6))
#define LABIBB_OFF_ON_DELAY 1000
#define LAB_ENABLE_TIME (LABIBB_OFF_ON_DELAY * 2)
#define IBB_ENABLE_TIME (LABIBB_OFF_ON_DELAY * 10)
#define LABIBB_POLL_ENABLED_TIME 1000
#define OCP_RECOVERY_INTERVAL_MS 500
#define SC_RECOVERY_INTERVAL_MS 250
#define LABIBB_MAX_OCP_COUNT 4
#define LABIBB_MAX_SC_COUNT 3
#define LABIBB_MAX_FATAL_COUNT 2
struct labibb_current_limits {
u32 uA_min;
u32 uA_step;
u8 ovr_val;
};
struct labibb_regulator {
struct regulator_desc desc;
struct device *dev;
struct regmap *regmap;
struct regulator_dev *rdev;
struct labibb_current_limits uA_limits;
struct delayed_work ocp_recovery_work;
struct delayed_work sc_recovery_work;
u16 base;
u8 type;
u8 dischg_sel;
u8 soft_start_sel;
int sc_irq;
int sc_count;
int ocp_irq;
int ocp_irq_count;
int fatal_count;
};
struct labibb_regulator_data {
const char *name;
u8 type;
u16 base;
const struct regulator_desc *desc;
};
static int qcom_labibb_ocp_hw_enable(struct regulator_dev *rdev)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
int ret;
/* Clear irq latch status to avoid spurious event */
ret = regmap_update_bits(rdev->regmap,
vreg->base + REG_LABIBB_INT_LATCHED_CLR,
LABIBB_INT_VREG_OK, 1);
if (ret)
return ret;
/* Enable OCP HW interrupt */
return regmap_update_bits(rdev->regmap,
vreg->base + REG_LABIBB_INT_EN_SET,
LABIBB_INT_VREG_OK, 1);
}
static int qcom_labibb_ocp_hw_disable(struct regulator_dev *rdev)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
return regmap_update_bits(rdev->regmap,
vreg->base + REG_LABIBB_INT_EN_CLR,
LABIBB_INT_VREG_OK, 1);
}
/**
* qcom_labibb_check_ocp_status - Check the Over-Current Protection status
* @vreg: Main driver structure
*
* This function checks the STATUS1 register for the VREG_OK bit: if it is
* set, then there is no Over-Current event.
*
* Returns: Zero if there is no over-current, 1 if in over-current or
* negative number for error
*/
static int qcom_labibb_check_ocp_status(struct labibb_regulator *vreg)
{
u32 cur_status;
int ret;
ret = regmap_read(vreg->rdev->regmap, vreg->base + REG_LABIBB_STATUS1,
&cur_status);
if (ret)
return ret;
return !(cur_status & LABIBB_STATUS1_VREG_OK_BIT);
}
/**
* qcom_labibb_ocp_recovery_worker - Handle OCP event
* @work: OCP work structure
*
* This is the worker function to handle the Over Current Protection
* hardware event; This will check if the hardware is still
* signaling an over-current condition and will eventually stop
* the regulator if such condition is still signaled after
* LABIBB_MAX_OCP_COUNT times.
*
* If the driver that is consuming the regulator did not take action
* for the OCP condition, or the hardware did not stabilize, a cut
* of the LAB and IBB regulators will be forced (regulators will be
* disabled).
*
* As last, if the writes to shut down the LAB/IBB regulators fail
* for more than LABIBB_MAX_FATAL_COUNT, then a kernel panic will be
* triggered, as a last resort to protect the hardware from burning;
* this, however, is expected to never happen, but this is kept to
* try to further ensure that we protect the hardware at all costs.
*/
static void qcom_labibb_ocp_recovery_worker(struct work_struct *work)
{
struct labibb_regulator *vreg;
const struct regulator_ops *ops;
int ret;
vreg = container_of(work, struct labibb_regulator,
ocp_recovery_work.work);
ops = vreg->rdev->desc->ops;
if (vreg->ocp_irq_count >= LABIBB_MAX_OCP_COUNT) {
/*
* If we tried to disable the regulator multiple times but
* we kept failing, there's only one last hope to save our
* hardware from the death: raise a kernel bug, reboot and
* hope that the bootloader kindly saves us. This, though
* is done only as paranoid checking, because failing the
* regmap write to disable the vreg is almost impossible,
* since we got here after multiple regmap R/W.
*/
BUG_ON(vreg->fatal_count > LABIBB_MAX_FATAL_COUNT);
dev_err(&vreg->rdev->dev, "LABIBB: CRITICAL: Disabling regulator\n");
/* Disable the regulator immediately to avoid damage */
ret = ops->disable(vreg->rdev);
if (ret) {
vreg->fatal_count++;
goto reschedule;
}
enable_irq(vreg->ocp_irq);
vreg->fatal_count = 0;
return;
}
ret = qcom_labibb_check_ocp_status(vreg);
if (ret != 0) {
vreg->ocp_irq_count++;
goto reschedule;
}
ret = qcom_labibb_ocp_hw_enable(vreg->rdev);
if (ret) {
/* We cannot trust it without OCP enabled. */
dev_err(vreg->dev, "Cannot enable OCP IRQ\n");
vreg->ocp_irq_count++;
goto reschedule;
}
enable_irq(vreg->ocp_irq);
/* Everything went fine: reset the OCP count! */
vreg->ocp_irq_count = 0;
return;
reschedule:
mod_delayed_work(system_wq, &vreg->ocp_recovery_work,
msecs_to_jiffies(OCP_RECOVERY_INTERVAL_MS));
}
/**
* qcom_labibb_ocp_isr - Interrupt routine for OverCurrent Protection
* @irq: Interrupt number
* @chip: Main driver structure
*
* Over Current Protection (OCP) will signal to the client driver
* that an over-current event has happened and then will schedule
* a recovery worker.
*
* Disabling and eventually re-enabling the regulator is expected
* to be done by the driver, as some hardware may be triggering an
* over-current condition only at first initialization or it may
* be expected only for a very brief amount of time, after which
* the attached hardware may be expected to stabilize its current
* draw.
*
* Returns: IRQ_HANDLED for success or IRQ_NONE for failure.
*/
static irqreturn_t qcom_labibb_ocp_isr(int irq, void *chip)
{
struct labibb_regulator *vreg = chip;
const struct regulator_ops *ops = vreg->rdev->desc->ops;
int ret;
/* If the regulator is not enabled, this is a fake event */
if (!ops->is_enabled(vreg->rdev))
return IRQ_HANDLED;
/* If we tried to recover for too many times it's not getting better */
if (vreg->ocp_irq_count > LABIBB_MAX_OCP_COUNT)
return IRQ_NONE;
/*
* If we (unlikely) can't read this register, to prevent hardware
* damage at all costs, we assume that the overcurrent event was
* real; Moreover, if the status register is not signaling OCP,
* it was a spurious event, so it's all ok.
*/
ret = qcom_labibb_check_ocp_status(vreg);
if (ret == 0) {
vreg->ocp_irq_count = 0;
goto end;
}
vreg->ocp_irq_count++;
/*
* Disable the interrupt temporarily, or it will fire continuously;
* we will re-enable it in the recovery worker function.
*/
disable_irq_nosync(irq);
/* Warn the user for overcurrent */
dev_warn(vreg->dev, "Over-Current interrupt fired!\n");
/* Disable the interrupt to avoid hogging */
ret = qcom_labibb_ocp_hw_disable(vreg->rdev);
if (ret)
goto end;
/* Signal overcurrent event to drivers */
regulator_notifier_call_chain(vreg->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
end:
/* Schedule the recovery work */
schedule_delayed_work(&vreg->ocp_recovery_work,
msecs_to_jiffies(OCP_RECOVERY_INTERVAL_MS));
if (ret)
return IRQ_NONE;
return IRQ_HANDLED;
}
static int qcom_labibb_set_ocp(struct regulator_dev *rdev, int lim,
int severity, bool enable)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
char *ocp_irq_name;
u32 irq_flags = IRQF_ONESHOT;
int irq_trig_low, ret;
/*
* labibb supports only protection - and does not support setting
* limit. Furthermore, we don't support disabling protection.
*/
if (lim || severity != REGULATOR_SEVERITY_PROT || !enable)
return -EINVAL;
/* If there is no OCP interrupt, there's nothing to set */
if (vreg->ocp_irq <= 0)
return -EINVAL;
ocp_irq_name = devm_kasprintf(vreg->dev, GFP_KERNEL, "%s-over-current",
vreg->desc.name);
if (!ocp_irq_name)
return -ENOMEM;
/* IRQ polarities - LAB: trigger-low, IBB: trigger-high */
switch (vreg->type) {
case QCOM_LAB_TYPE:
irq_flags |= IRQF_TRIGGER_LOW;
irq_trig_low = 1;
break;
case QCOM_IBB_TYPE:
irq_flags |= IRQF_TRIGGER_HIGH;
irq_trig_low = 0;
break;
default:
return -EINVAL;
}
/* Activate OCP HW level interrupt */
ret = regmap_update_bits(rdev->regmap,
vreg->base + REG_LABIBB_INT_SET_TYPE,
LABIBB_INT_VREG_OK,
LABIBB_INT_VREG_TYPE_LEVEL);
if (ret)
return ret;
/* Set OCP interrupt polarity */
ret = regmap_update_bits(rdev->regmap,
vreg->base + REG_LABIBB_INT_POLARITY_HIGH,
LABIBB_INT_VREG_OK, !irq_trig_low);
if (ret)
return ret;
ret = regmap_update_bits(rdev->regmap,
vreg->base + REG_LABIBB_INT_POLARITY_LOW,
LABIBB_INT_VREG_OK, irq_trig_low);
if (ret)
return ret;
ret = qcom_labibb_ocp_hw_enable(rdev);
if (ret)
return ret;
return devm_request_threaded_irq(vreg->dev, vreg->ocp_irq, NULL,
qcom_labibb_ocp_isr, irq_flags,
ocp_irq_name, vreg);
}
/**
* qcom_labibb_check_sc_status - Check the Short Circuit Protection status
* @vreg: Main driver structure
*
* This function checks the STATUS1 register on both LAB and IBB regulators
* for the ShortCircuit bit: if it is set on *any* of them, then we have
* experienced a short-circuit event.
*
* Returns: Zero if there is no short-circuit, 1 if in short-circuit or
* negative number for error
*/
static int qcom_labibb_check_sc_status(struct labibb_regulator *vreg)
{
u32 ibb_status, ibb_reg, lab_status, lab_reg;
int ret;
/* We have to work on both regulators due to PBS... */
lab_reg = ibb_reg = vreg->base + REG_LABIBB_STATUS1;
if (vreg->type == QCOM_LAB_TYPE)
ibb_reg -= PMI8998_IBB_LAB_REG_OFFSET;
else
lab_reg += PMI8998_IBB_LAB_REG_OFFSET;
ret = regmap_read(vreg->rdev->regmap, lab_reg, &lab_status);
if (ret)
return ret;
ret = regmap_read(vreg->rdev->regmap, ibb_reg, &ibb_status);
if (ret)
return ret;
return !!(lab_status & LABIBB_STATUS1_SC_BIT) ||
!!(ibb_status & LABIBB_STATUS1_SC_BIT);
}
/**
* qcom_labibb_sc_recovery_worker - Handle Short Circuit event
* @work: SC work structure
*
* This is the worker function to handle the Short Circuit Protection
* hardware event; This will check if the hardware is still
* signaling a short-circuit condition and will eventually never
* re-enable the regulator if such condition is still signaled after
* LABIBB_MAX_SC_COUNT times.
*
* If the driver that is consuming the regulator did not take action
* for the SC condition, or the hardware did not stabilize, this
* worker will stop rescheduling, leaving the regulators disabled
* as already done by the Portable Batch System (PBS).
*
* Returns: IRQ_HANDLED for success or IRQ_NONE for failure.
*/
static void qcom_labibb_sc_recovery_worker(struct work_struct *work)
{
struct labibb_regulator *vreg;
const struct regulator_ops *ops;
u32 lab_reg, ibb_reg, lab_val, ibb_val, val;
bool pbs_cut = false;
int i, sc, ret;
vreg = container_of(work, struct labibb_regulator,
sc_recovery_work.work);
ops = vreg->rdev->desc->ops;
/*
* If we tried to check the regulator status multiple times but we
* kept failing, then just bail out, as the Portable Batch System
* (PBS) will disable the vregs for us, preventing hardware damage.
*/
if (vreg->fatal_count > LABIBB_MAX_FATAL_COUNT)
return;
/* Too many short-circuit events. Throw in the towel. */
if (vreg->sc_count > LABIBB_MAX_SC_COUNT)
return;
/*
* The Portable Batch System (PBS) automatically disables LAB
* and IBB when a short-circuit event is detected, so we have to
* check and work on both of them at the same time.
*/
lab_reg = ibb_reg = vreg->base + REG_LABIBB_ENABLE_CTL;
if (vreg->type == QCOM_LAB_TYPE)
ibb_reg -= PMI8998_IBB_LAB_REG_OFFSET;
else
lab_reg += PMI8998_IBB_LAB_REG_OFFSET;
sc = qcom_labibb_check_sc_status(vreg);
if (sc)
goto reschedule;
for (i = 0; i < LABIBB_MAX_SC_COUNT; i++) {
ret = regmap_read(vreg->regmap, lab_reg, &lab_val);
if (ret) {
vreg->fatal_count++;
goto reschedule;
}
ret = regmap_read(vreg->regmap, ibb_reg, &ibb_val);
if (ret) {
vreg->fatal_count++;
goto reschedule;
}
val = lab_val & ibb_val;
if (!(val & LABIBB_CONTROL_ENABLE)) {
pbs_cut = true;
break;
}
usleep_range(5000, 6000);
}
if (pbs_cut)
goto reschedule;
/*
* If we have reached this point, we either have successfully
* recovered from the SC condition or we had a spurious SC IRQ,
* which means that we can re-enable the regulators, if they
* have ever been disabled by the PBS.
*/
ret = ops->enable(vreg->rdev);
if (ret)
goto reschedule;
/* Everything went fine: reset the OCP count! */
vreg->sc_count = 0;
enable_irq(vreg->sc_irq);
return;
reschedule:
/*
* Now that we have done basic handling of the short-circuit,
* reschedule this worker in the regular system workqueue, as
* taking action is not truly urgent anymore.
*/
vreg->sc_count++;
mod_delayed_work(system_wq, &vreg->sc_recovery_work,
msecs_to_jiffies(SC_RECOVERY_INTERVAL_MS));
}
/**
* qcom_labibb_sc_isr - Interrupt routine for Short Circuit Protection
* @irq: Interrupt number
* @chip: Main driver structure
*
* Short Circuit Protection (SCP) will signal to the client driver
* that a regulation-out event has happened and then will schedule
* a recovery worker.
*
* The LAB and IBB regulators will be automatically disabled by the
* Portable Batch System (PBS) and they will be enabled again by
* the worker function if the hardware stops signaling the short
* circuit event.
*
* Returns: IRQ_HANDLED for success or IRQ_NONE for failure.
*/
static irqreturn_t qcom_labibb_sc_isr(int irq, void *chip)
{
struct labibb_regulator *vreg = chip;
if (vreg->sc_count > LABIBB_MAX_SC_COUNT)
return IRQ_NONE;
/* Warn the user for short circuit */
dev_warn(vreg->dev, "Short-Circuit interrupt fired!\n");
/*
* Disable the interrupt temporarily, or it will fire continuously;
* we will re-enable it in the recovery worker function.
*/
disable_irq_nosync(irq);
/* Signal out of regulation event to drivers */
regulator_notifier_call_chain(vreg->rdev,
REGULATOR_EVENT_REGULATION_OUT, NULL);
/* Schedule the short-circuit handling as high-priority work */
mod_delayed_work(system_highpri_wq, &vreg->sc_recovery_work,
msecs_to_jiffies(SC_RECOVERY_INTERVAL_MS));
return IRQ_HANDLED;
}
static int qcom_labibb_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
struct regulator_desc *desc = &vreg->desc;
struct labibb_current_limits *lim = &vreg->uA_limits;
u32 mask, val;
int i, ret, sel = -1;
if (min_uA < lim->uA_min || max_uA < lim->uA_min)
return -EINVAL;
for (i = 0; i < desc->n_current_limits; i++) {
int uA_limit = (lim->uA_step * i) + lim->uA_min;
if (max_uA >= uA_limit && min_uA <= uA_limit)
sel = i;
}
if (sel < 0)
return -EINVAL;
/* Current limit setting needs secure access */
ret = regmap_write(vreg->regmap, vreg->base + REG_LABIBB_SEC_ACCESS,
LABIBB_SEC_UNLOCK_CODE);
if (ret)
return ret;
mask = desc->csel_mask | lim->ovr_val;
mask |= LABIBB_CURRENT_LIMIT_EN;
val = (u32)sel | lim->ovr_val;
val |= LABIBB_CURRENT_LIMIT_EN;
return regmap_update_bits(vreg->regmap, desc->csel_reg, mask, val);
}
static int qcom_labibb_get_current_limit(struct regulator_dev *rdev)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
struct regulator_desc *desc = &vreg->desc;
struct labibb_current_limits *lim = &vreg->uA_limits;
unsigned int cur_step;
int ret;
ret = regmap_read(vreg->regmap, desc->csel_reg, &cur_step);
if (ret)
return ret;
cur_step &= desc->csel_mask;
return (cur_step * lim->uA_step) + lim->uA_min;
}
static int qcom_labibb_set_soft_start(struct regulator_dev *rdev)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
u32 val = 0;
if (vreg->type == QCOM_IBB_TYPE)
val = vreg->dischg_sel;
else
val = vreg->soft_start_sel;
return regmap_write(rdev->regmap, rdev->desc->soft_start_reg, val);
}
static int qcom_labibb_get_table_sel(const int *table, int sz, u32 value)
{
int i;
for (i = 0; i < sz; i++)
if (table[i] == value)
return i;
return -EINVAL;
}
/* IBB discharge resistor values in KOhms */
static const int dischg_resistor_values[] = { 300, 64, 32, 16 };
/* Soft start time in microseconds */
static const int soft_start_values[] = { 200, 400, 600, 800 };
static int qcom_labibb_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct labibb_regulator *vreg = config->driver_data;
u32 dischg_kohms, soft_start_time;
int ret;
ret = of_property_read_u32(np, "qcom,discharge-resistor-kohms",
&dischg_kohms);
if (ret)
dischg_kohms = 300;
ret = qcom_labibb_get_table_sel(dischg_resistor_values,
ARRAY_SIZE(dischg_resistor_values),
dischg_kohms);
if (ret < 0)
return ret;
vreg->dischg_sel = (u8)ret;
ret = of_property_read_u32(np, "qcom,soft-start-us",
&soft_start_time);
if (ret)
soft_start_time = 200;
ret = qcom_labibb_get_table_sel(soft_start_values,
ARRAY_SIZE(soft_start_values),
soft_start_time);
if (ret < 0)
return ret;
vreg->soft_start_sel = (u8)ret;
return 0;
}
static const struct regulator_ops qcom_labibb_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.set_active_discharge = regulator_set_active_discharge_regmap,
.set_pull_down = regulator_set_pull_down_regmap,
.set_current_limit = qcom_labibb_set_current_limit,
.get_current_limit = qcom_labibb_get_current_limit,
.set_soft_start = qcom_labibb_set_soft_start,
.set_over_current_protection = qcom_labibb_set_ocp,
};
static const struct regulator_desc pmi8998_lab_desc = {
.enable_mask = LAB_ENABLE_CTL_MASK,
.enable_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_ENABLE_CTL),
.enable_val = LABIBB_CONTROL_ENABLE,
.enable_time = LAB_ENABLE_TIME,
.poll_enabled_time = LABIBB_POLL_ENABLED_TIME,
.soft_start_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_SOFT_START_CTL),
.pull_down_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_PD_CTL),
.pull_down_mask = LAB_PD_CTL_MASK,
.pull_down_val_on = LAB_PD_CTL_STRONG_PULL,
.vsel_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_VOLTAGE),
.vsel_mask = LAB_VOLTAGE_SET_MASK,
.apply_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_VOLTAGE),
.apply_bit = LABIBB_VOLTAGE_OVERRIDE_EN,
.csel_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_CURRENT_LIMIT),
.csel_mask = LAB_CURRENT_LIMIT_MASK,
.n_current_limits = 8,
.off_on_delay = LABIBB_OFF_ON_DELAY,
.owner = THIS_MODULE,
.type = REGULATOR_VOLTAGE,
.min_uV = 4600000,
.uV_step = 100000,
.n_voltages = 16,
.ops = &qcom_labibb_ops,
.of_parse_cb = qcom_labibb_of_parse_cb,
};
static const struct regulator_desc pmi8998_ibb_desc = {
.enable_mask = IBB_ENABLE_CTL_MASK,
.enable_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_ENABLE_CTL),
.enable_val = LABIBB_CONTROL_ENABLE,
.enable_time = IBB_ENABLE_TIME,
.poll_enabled_time = LABIBB_POLL_ENABLED_TIME,
.soft_start_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_SOFT_START_CTL),
.active_discharge_off = 0,
.active_discharge_on = IBB_CTL_1_DISCHARGE_EN,
.active_discharge_mask = IBB_CTL_1_DISCHARGE_EN,
.active_discharge_reg = (PMI8998_IBB_REG_BASE + REG_IBB_PWRUP_PWRDN_CTL_1),
.pull_down_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_PD_CTL),
.pull_down_mask = IBB_PD_CTL_MASK,
.pull_down_val_on = IBB_PD_CTL_HALF_STRENGTH | IBB_PD_CTL_EN,
.vsel_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_VOLTAGE),
.vsel_mask = IBB_VOLTAGE_SET_MASK,
.apply_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_VOLTAGE),
.apply_bit = LABIBB_VOLTAGE_OVERRIDE_EN,
.csel_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_CURRENT_LIMIT),
.csel_mask = IBB_CURRENT_LIMIT_MASK,
.n_current_limits = 32,
.off_on_delay = LABIBB_OFF_ON_DELAY,
.owner = THIS_MODULE,
.type = REGULATOR_VOLTAGE,
.min_uV = 1400000,
.uV_step = 100000,
.n_voltages = 64,
.ops = &qcom_labibb_ops,
.of_parse_cb = qcom_labibb_of_parse_cb,
};
static const struct labibb_regulator_data pmi8998_labibb_data[] = {
{"lab", QCOM_LAB_TYPE, PMI8998_LAB_REG_BASE, &pmi8998_lab_desc},
{"ibb", QCOM_IBB_TYPE, PMI8998_IBB_REG_BASE, &pmi8998_ibb_desc},
{ },
};
static const struct of_device_id qcom_labibb_match[] = {
{ .compatible = "qcom,pmi8998-lab-ibb", .data = &pmi8998_labibb_data},
{ },
};
MODULE_DEVICE_TABLE(of, qcom_labibb_match);
static int qcom_labibb_regulator_probe(struct platform_device *pdev)
{
struct labibb_regulator *vreg;
struct device *dev = &pdev->dev;
struct regulator_config cfg = {};
struct device_node *reg_node;
const struct of_device_id *match;
const struct labibb_regulator_data *reg_data;
struct regmap *reg_regmap;
unsigned int type;
int ret;
reg_regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!reg_regmap) {
dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
return -ENODEV;
}
match = of_match_device(qcom_labibb_match, &pdev->dev);
if (!match)
return -ENODEV;
for (reg_data = match->data; reg_data->name; reg_data++) {
char *sc_irq_name;
int irq = 0;
/* Validate if the type of regulator is indeed
* what's mentioned in DT.
*/
ret = regmap_read(reg_regmap, reg_data->base + REG_PERPH_TYPE,
&type);
if (ret < 0) {
dev_err(dev,
"Peripheral type read failed ret=%d\n",
ret);
return -EINVAL;
}
if (WARN_ON((type != QCOM_LAB_TYPE) && (type != QCOM_IBB_TYPE)) ||
WARN_ON(type != reg_data->type))
return -EINVAL;
vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg),
GFP_KERNEL);
if (!vreg)
return -ENOMEM;
sc_irq_name = devm_kasprintf(dev, GFP_KERNEL,
"%s-short-circuit",
reg_data->name);
if (!sc_irq_name)
return -ENOMEM;
reg_node = of_get_child_by_name(pdev->dev.of_node,
reg_data->name);
if (!reg_node)
return -EINVAL;
/* The Short Circuit interrupt is critical */
irq = of_irq_get_byname(reg_node, "sc-err");
if (irq <= 0) {
if (irq == 0)
irq = -EINVAL;
of_node_put(reg_node);
return dev_err_probe(vreg->dev, irq,
"Short-circuit irq not found.\n");
}
vreg->sc_irq = irq;
/* OverCurrent Protection IRQ is optional */
irq = of_irq_get_byname(reg_node, "ocp");
vreg->ocp_irq = irq;
vreg->ocp_irq_count = 0;
of_node_put(reg_node);
vreg->regmap = reg_regmap;
vreg->dev = dev;
vreg->base = reg_data->base;
vreg->type = reg_data->type;
INIT_DELAYED_WORK(&vreg->sc_recovery_work,
qcom_labibb_sc_recovery_worker);
if (vreg->ocp_irq > 0)
INIT_DELAYED_WORK(&vreg->ocp_recovery_work,
qcom_labibb_ocp_recovery_worker);
switch (vreg->type) {
case QCOM_LAB_TYPE:
/* LAB Limits: 200-1600mA */
vreg->uA_limits.uA_min = 200000;
vreg->uA_limits.uA_step = 200000;
vreg->uA_limits.ovr_val = LAB_CURRENT_LIMIT_OVERRIDE_EN;
break;
case QCOM_IBB_TYPE:
/* IBB Limits: 0-1550mA */
vreg->uA_limits.uA_min = 0;
vreg->uA_limits.uA_step = 50000;
vreg->uA_limits.ovr_val = 0; /* No override bit */
break;
default:
return -EINVAL;
}
memcpy(&vreg->desc, reg_data->desc, sizeof(vreg->desc));
vreg->desc.of_match = reg_data->name;
vreg->desc.name = reg_data->name;
cfg.dev = vreg->dev;
cfg.driver_data = vreg;
cfg.regmap = vreg->regmap;
vreg->rdev = devm_regulator_register(vreg->dev, &vreg->desc,
&cfg);
if (IS_ERR(vreg->rdev)) {
dev_err(dev, "qcom_labibb: error registering %s : %d\n",
reg_data->name, ret);
return PTR_ERR(vreg->rdev);
}
ret = devm_request_threaded_irq(vreg->dev, vreg->sc_irq, NULL,
qcom_labibb_sc_isr,
IRQF_ONESHOT |
IRQF_TRIGGER_RISING,
sc_irq_name, vreg);
if (ret)
return ret;
}
return 0;
}
static struct platform_driver qcom_labibb_regulator_driver = {
.driver = {
.name = "qcom-lab-ibb-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = qcom_labibb_match,
},
.probe = qcom_labibb_regulator_probe,
};
module_platform_driver(qcom_labibb_regulator_driver);
MODULE_DESCRIPTION("Qualcomm labibb driver");
MODULE_AUTHOR("Nisha Kumari <[email protected]>");
MODULE_AUTHOR("Sumit Semwal <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/qcom-labibb-regulator.c |
/*
* pbias-regulator.c
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
* Author: Balaji T K <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/of.h>
struct pbias_reg_info {
u32 enable;
u32 enable_mask;
u32 disable_val;
u32 vmode;
unsigned int enable_time;
char *name;
const unsigned int *pbias_volt_table;
int n_voltages;
};
struct pbias_of_data {
unsigned int offset;
};
static const unsigned int pbias_volt_table_3_0V[] = {
1800000,
3000000
};
static const unsigned int pbias_volt_table_3_3V[] = {
1800000,
3300000
};
static const struct regulator_ops pbias_regulator_voltage_ops = {
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct pbias_reg_info pbias_mmc_omap2430 = {
.enable = BIT(1),
.enable_mask = BIT(1),
.vmode = BIT(0),
.disable_val = 0,
.enable_time = 100,
.pbias_volt_table = pbias_volt_table_3_0V,
.n_voltages = 2,
.name = "pbias_mmc_omap2430"
};
static const struct pbias_reg_info pbias_sim_omap3 = {
.enable = BIT(9),
.enable_mask = BIT(9),
.vmode = BIT(8),
.enable_time = 100,
.pbias_volt_table = pbias_volt_table_3_0V,
.n_voltages = 2,
.name = "pbias_sim_omap3"
};
static const struct pbias_reg_info pbias_mmc_omap4 = {
.enable = BIT(26) | BIT(22),
.enable_mask = BIT(26) | BIT(25) | BIT(22),
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
.pbias_volt_table = pbias_volt_table_3_0V,
.n_voltages = 2,
.name = "pbias_mmc_omap4"
};
static const struct pbias_reg_info pbias_mmc_omap5 = {
.enable = BIT(27) | BIT(26),
.enable_mask = BIT(27) | BIT(25) | BIT(26),
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
.pbias_volt_table = pbias_volt_table_3_3V,
.n_voltages = 2,
.name = "pbias_mmc_omap5"
};
static struct of_regulator_match pbias_matches[] = {
{ .name = "pbias_mmc_omap2430", .driver_data = (void *)&pbias_mmc_omap2430},
{ .name = "pbias_sim_omap3", .driver_data = (void *)&pbias_sim_omap3},
{ .name = "pbias_mmc_omap4", .driver_data = (void *)&pbias_mmc_omap4},
{ .name = "pbias_mmc_omap5", .driver_data = (void *)&pbias_mmc_omap5},
};
#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches)
/* Offset from SCM general area (and syscon) base */
static const struct pbias_of_data pbias_of_data_omap2 = {
.offset = 0x230,
};
static const struct pbias_of_data pbias_of_data_omap3 = {
.offset = 0x2b0,
};
static const struct pbias_of_data pbias_of_data_omap4 = {
.offset = 0x60,
};
static const struct pbias_of_data pbias_of_data_omap5 = {
.offset = 0x60,
};
static const struct pbias_of_data pbias_of_data_dra7 = {
.offset = 0xe00,
};
static const struct of_device_id pbias_of_match[] = {
{ .compatible = "ti,pbias-omap", },
{ .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, },
{ .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, },
{ .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, },
{ .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, },
{ .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, },
{},
};
MODULE_DEVICE_TABLE(of, pbias_of_match);
static int pbias_regulator_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct resource *res;
struct regulator_config cfg = { };
struct regulator_desc *desc;
struct regulator_dev *rdev;
struct regmap *syscon;
const struct pbias_reg_info *info;
int ret, count, idx;
const struct pbias_of_data *data;
unsigned int offset;
count = of_regulator_match(&pdev->dev, np, pbias_matches,
PBIAS_NUM_REGS);
if (count < 0)
return count;
desc = devm_kcalloc(&pdev->dev, count, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(syscon))
return PTR_ERR(syscon);
data = of_device_get_match_data(&pdev->dev);
if (data) {
offset = data->offset;
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
offset = res->start;
dev_WARN(&pdev->dev,
"using legacy dt data for pbias offset\n");
}
cfg.regmap = syscon;
cfg.dev = &pdev->dev;
for (idx = 0; idx < PBIAS_NUM_REGS && count; idx++) {
if (!pbias_matches[idx].init_data ||
!pbias_matches[idx].of_node)
continue;
info = pbias_matches[idx].driver_data;
if (!info)
return -ENODEV;
desc->name = info->name;
desc->owner = THIS_MODULE;
desc->type = REGULATOR_VOLTAGE;
desc->ops = &pbias_regulator_voltage_ops;
desc->volt_table = info->pbias_volt_table;
desc->n_voltages = info->n_voltages;
desc->enable_time = info->enable_time;
desc->vsel_reg = offset;
desc->vsel_mask = info->vmode;
desc->enable_reg = offset;
desc->enable_mask = info->enable_mask;
desc->enable_val = info->enable;
desc->disable_val = info->disable_val;
cfg.init_data = pbias_matches[idx].init_data;
cfg.of_node = pbias_matches[idx].of_node;
rdev = devm_regulator_register(&pdev->dev, desc, &cfg);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev,
"Failed to register regulator: %d\n", ret);
return ret;
}
desc++;
count--;
}
return 0;
}
static struct platform_driver pbias_regulator_driver = {
.probe = pbias_regulator_probe,
.driver = {
.name = "pbias-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pbias_of_match),
},
};
module_platform_driver(pbias_regulator_driver);
MODULE_AUTHOR("Balaji T K <[email protected]>");
MODULE_DESCRIPTION("pbias voltage regulator");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pbias-regulator");
| linux-master | drivers/regulator/pbias-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright (C) 2020 MediaTek Inc.
//
// Author: Gene Chen <[email protected]>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <dt-bindings/regulator/mediatek,mt6360-regulator.h>
enum {
MT6360_REGULATOR_BUCK1 = 0,
MT6360_REGULATOR_BUCK2,
MT6360_REGULATOR_LDO6,
MT6360_REGULATOR_LDO7,
MT6360_REGULATOR_LDO1,
MT6360_REGULATOR_LDO2,
MT6360_REGULATOR_LDO3,
MT6360_REGULATOR_LDO5,
MT6360_REGULATOR_MAX,
};
struct mt6360_irq_mapping {
const char *name;
irq_handler_t handler;
};
struct mt6360_regulator_desc {
const struct regulator_desc desc;
unsigned int mode_reg;
unsigned int mode_mask;
unsigned int state_reg;
unsigned int state_mask;
const struct mt6360_irq_mapping *irq_tables;
int irq_table_size;
};
struct mt6360_regulator_data {
struct device *dev;
struct regmap *regmap;
};
static irqreturn_t mt6360_pgb_event_handler(int irq, void *data)
{
struct regulator_dev *rdev = data;
regulator_notifier_call_chain(rdev, REGULATOR_EVENT_FAIL, NULL);
return IRQ_HANDLED;
}
static irqreturn_t mt6360_oc_event_handler(int irq, void *data)
{
struct regulator_dev *rdev = data;
regulator_notifier_call_chain(rdev, REGULATOR_EVENT_OVER_CURRENT, NULL);
return IRQ_HANDLED;
}
static irqreturn_t mt6360_ov_event_handler(int irq, void *data)
{
struct regulator_dev *rdev = data;
regulator_notifier_call_chain(rdev, REGULATOR_EVENT_REGULATION_OUT, NULL);
return IRQ_HANDLED;
}
static irqreturn_t mt6360_uv_event_handler(int irq, void *data)
{
struct regulator_dev *rdev = data;
regulator_notifier_call_chain(rdev, REGULATOR_EVENT_UNDER_VOLTAGE, NULL);
return IRQ_HANDLED;
}
static const struct mt6360_irq_mapping buck1_irq_tbls[] = {
{ "buck1_pgb_evt", mt6360_pgb_event_handler },
{ "buck1_oc_evt", mt6360_oc_event_handler },
{ "buck1_ov_evt", mt6360_ov_event_handler },
{ "buck1_uv_evt", mt6360_uv_event_handler },
};
static const struct mt6360_irq_mapping buck2_irq_tbls[] = {
{ "buck2_pgb_evt", mt6360_pgb_event_handler },
{ "buck2_oc_evt", mt6360_oc_event_handler },
{ "buck2_ov_evt", mt6360_ov_event_handler },
{ "buck2_uv_evt", mt6360_uv_event_handler },
};
static const struct mt6360_irq_mapping ldo6_irq_tbls[] = {
{ "ldo6_pgb_evt", mt6360_pgb_event_handler },
{ "ldo6_oc_evt", mt6360_oc_event_handler },
};
static const struct mt6360_irq_mapping ldo7_irq_tbls[] = {
{ "ldo7_pgb_evt", mt6360_pgb_event_handler },
{ "ldo7_oc_evt", mt6360_oc_event_handler },
};
static const struct mt6360_irq_mapping ldo1_irq_tbls[] = {
{ "ldo1_pgb_evt", mt6360_pgb_event_handler },
{ "ldo1_oc_evt", mt6360_oc_event_handler },
};
static const struct mt6360_irq_mapping ldo2_irq_tbls[] = {
{ "ldo2_pgb_evt", mt6360_pgb_event_handler },
{ "ldo2_oc_evt", mt6360_oc_event_handler },
};
static const struct mt6360_irq_mapping ldo3_irq_tbls[] = {
{ "ldo3_pgb_evt", mt6360_pgb_event_handler },
{ "ldo3_oc_evt", mt6360_oc_event_handler },
};
static const struct mt6360_irq_mapping ldo5_irq_tbls[] = {
{ "ldo5_pgb_evt", mt6360_pgb_event_handler },
{ "ldo5_oc_evt", mt6360_oc_event_handler },
};
static const struct linear_range buck_vout_ranges[] = {
REGULATOR_LINEAR_RANGE(300000, 0x00, 0xc7, 5000),
REGULATOR_LINEAR_RANGE(1300000, 0xc8, 0xff, 0),
};
static const struct linear_range ldo_vout_ranges1[] = {
REGULATOR_LINEAR_RANGE(500000, 0x00, 0x09, 10000),
REGULATOR_LINEAR_RANGE(600000, 0x0a, 0x10, 0),
REGULATOR_LINEAR_RANGE(610000, 0x11, 0x19, 10000),
REGULATOR_LINEAR_RANGE(700000, 0x1a, 0x20, 0),
REGULATOR_LINEAR_RANGE(710000, 0x21, 0x29, 10000),
REGULATOR_LINEAR_RANGE(800000, 0x2a, 0x30, 0),
REGULATOR_LINEAR_RANGE(810000, 0x31, 0x39, 10000),
REGULATOR_LINEAR_RANGE(900000, 0x3a, 0x40, 0),
REGULATOR_LINEAR_RANGE(910000, 0x41, 0x49, 10000),
REGULATOR_LINEAR_RANGE(1000000, 0x4a, 0x50, 0),
REGULATOR_LINEAR_RANGE(1010000, 0x51, 0x59, 10000),
REGULATOR_LINEAR_RANGE(1100000, 0x5a, 0x60, 0),
REGULATOR_LINEAR_RANGE(1110000, 0x61, 0x69, 10000),
REGULATOR_LINEAR_RANGE(1200000, 0x6a, 0x70, 0),
REGULATOR_LINEAR_RANGE(1210000, 0x71, 0x79, 10000),
REGULATOR_LINEAR_RANGE(1300000, 0x7a, 0x80, 0),
REGULATOR_LINEAR_RANGE(1310000, 0x81, 0x89, 10000),
REGULATOR_LINEAR_RANGE(1400000, 0x8a, 0x90, 0),
REGULATOR_LINEAR_RANGE(1410000, 0x91, 0x99, 10000),
REGULATOR_LINEAR_RANGE(1500000, 0x9a, 0xa0, 0),
REGULATOR_LINEAR_RANGE(1510000, 0xa1, 0xa9, 10000),
REGULATOR_LINEAR_RANGE(1600000, 0xaa, 0xb0, 0),
REGULATOR_LINEAR_RANGE(1610000, 0xb1, 0xb9, 10000),
REGULATOR_LINEAR_RANGE(1700000, 0xba, 0xc0, 0),
REGULATOR_LINEAR_RANGE(1710000, 0xc1, 0xc9, 10000),
REGULATOR_LINEAR_RANGE(1800000, 0xca, 0xd0, 0),
REGULATOR_LINEAR_RANGE(1810000, 0xd1, 0xd9, 10000),
REGULATOR_LINEAR_RANGE(1900000, 0xda, 0xe0, 0),
REGULATOR_LINEAR_RANGE(1910000, 0xe1, 0xe9, 10000),
REGULATOR_LINEAR_RANGE(2000000, 0xea, 0xf0, 0),
REGULATOR_LINEAR_RANGE(2010000, 0xf1, 0xf9, 10000),
REGULATOR_LINEAR_RANGE(2100000, 0xfa, 0xff, 0),
};
static const struct linear_range ldo_vout_ranges2[] = {
REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x09, 10000),
REGULATOR_LINEAR_RANGE(1300000, 0x0a, 0x10, 0),
REGULATOR_LINEAR_RANGE(1310000, 0x11, 0x19, 10000),
REGULATOR_LINEAR_RANGE(1400000, 0x1a, 0x1f, 0),
REGULATOR_LINEAR_RANGE(1500000, 0x20, 0x29, 10000),
REGULATOR_LINEAR_RANGE(1600000, 0x2a, 0x2f, 0),
REGULATOR_LINEAR_RANGE(1700000, 0x30, 0x39, 10000),
REGULATOR_LINEAR_RANGE(1800000, 0x3a, 0x40, 0),
REGULATOR_LINEAR_RANGE(1810000, 0x41, 0x49, 10000),
REGULATOR_LINEAR_RANGE(1900000, 0x4a, 0x4f, 0),
REGULATOR_LINEAR_RANGE(2000000, 0x50, 0x59, 10000),
REGULATOR_LINEAR_RANGE(2100000, 0x5a, 0x60, 0),
REGULATOR_LINEAR_RANGE(2110000, 0x61, 0x69, 10000),
REGULATOR_LINEAR_RANGE(2200000, 0x6a, 0x6f, 0),
REGULATOR_LINEAR_RANGE(2500000, 0x70, 0x79, 10000),
REGULATOR_LINEAR_RANGE(2600000, 0x7a, 0x7f, 0),
REGULATOR_LINEAR_RANGE(2700000, 0x80, 0x89, 10000),
REGULATOR_LINEAR_RANGE(2800000, 0x8a, 0x90, 0),
REGULATOR_LINEAR_RANGE(2810000, 0x91, 0x99, 10000),
REGULATOR_LINEAR_RANGE(2900000, 0x9a, 0xa0, 0),
REGULATOR_LINEAR_RANGE(2910000, 0xa1, 0xa9, 10000),
REGULATOR_LINEAR_RANGE(3000000, 0xaa, 0xb0, 0),
REGULATOR_LINEAR_RANGE(3010000, 0xb1, 0xb9, 10000),
REGULATOR_LINEAR_RANGE(3100000, 0xba, 0xc0, 0),
REGULATOR_LINEAR_RANGE(3110000, 0xc1, 0xc9, 10000),
REGULATOR_LINEAR_RANGE(3200000, 0xca, 0xcf, 0),
REGULATOR_LINEAR_RANGE(3300000, 0xd0, 0xd9, 10000),
REGULATOR_LINEAR_RANGE(3400000, 0xda, 0xe0, 0),
REGULATOR_LINEAR_RANGE(3410000, 0xe1, 0xe9, 10000),
REGULATOR_LINEAR_RANGE(3500000, 0xea, 0xf0, 0),
REGULATOR_LINEAR_RANGE(3510000, 0xf1, 0xf9, 10000),
REGULATOR_LINEAR_RANGE(3600000, 0xfa, 0xff, 0),
};
static const struct linear_range ldo_vout_ranges3[] = {
REGULATOR_LINEAR_RANGE(2700000, 0x00, 0x09, 10000),
REGULATOR_LINEAR_RANGE(2800000, 0x0a, 0x10, 0),
REGULATOR_LINEAR_RANGE(2810000, 0x11, 0x19, 10000),
REGULATOR_LINEAR_RANGE(2900000, 0x1a, 0x20, 0),
REGULATOR_LINEAR_RANGE(2910000, 0x21, 0x29, 10000),
REGULATOR_LINEAR_RANGE(3000000, 0x2a, 0x30, 0),
REGULATOR_LINEAR_RANGE(3010000, 0x31, 0x39, 10000),
REGULATOR_LINEAR_RANGE(3100000, 0x3a, 0x40, 0),
REGULATOR_LINEAR_RANGE(3110000, 0x41, 0x49, 10000),
REGULATOR_LINEAR_RANGE(3200000, 0x4a, 0x4f, 0),
REGULATOR_LINEAR_RANGE(3300000, 0x50, 0x59, 10000),
REGULATOR_LINEAR_RANGE(3400000, 0x5a, 0x60, 0),
REGULATOR_LINEAR_RANGE(3410000, 0x61, 0x69, 10000),
REGULATOR_LINEAR_RANGE(3500000, 0x6a, 0x70, 0),
REGULATOR_LINEAR_RANGE(3510000, 0x71, 0x79, 10000),
REGULATOR_LINEAR_RANGE(3600000, 0x7a, 0x7f, 0),
};
static int mt6360_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
const struct mt6360_regulator_desc *rdesc = (struct mt6360_regulator_desc *)rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
int shift = ffs(rdesc->mode_mask) - 1;
unsigned int val;
int ret;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = MT6360_OPMODE_NORMAL;
break;
case REGULATOR_MODE_STANDBY:
val = MT6360_OPMODE_ULP;
break;
case REGULATOR_MODE_IDLE:
val = MT6360_OPMODE_LP;
break;
default:
return -EINVAL;
}
ret = regmap_update_bits(regmap, rdesc->mode_reg, rdesc->mode_mask, val << shift);
if (ret) {
dev_err(&rdev->dev, "%s: fail (%d)\n", __func__, ret);
return ret;
}
return 0;
}
static unsigned int mt6360_regulator_get_mode(struct regulator_dev *rdev)
{
const struct mt6360_regulator_desc *rdesc = (struct mt6360_regulator_desc *)rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
int shift = ffs(rdesc->mode_mask) - 1;
unsigned int val;
int ret;
ret = regmap_read(regmap, rdesc->mode_reg, &val);
if (ret)
return ret;
val &= rdesc->mode_mask;
val >>= shift;
switch (val) {
case MT6360_OPMODE_LP:
return REGULATOR_MODE_IDLE;
case MT6360_OPMODE_ULP:
return REGULATOR_MODE_STANDBY;
case MT6360_OPMODE_NORMAL:
return REGULATOR_MODE_NORMAL;
default:
return -EINVAL;
}
}
static int mt6360_regulator_get_status(struct regulator_dev *rdev)
{
const struct mt6360_regulator_desc *rdesc = (struct mt6360_regulator_desc *)rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
ret = regmap_read(regmap, rdesc->state_reg, &val);
if (ret)
return ret;
if (val & rdesc->state_mask)
return REGULATOR_STATUS_ON;
return REGULATOR_STATUS_OFF;
}
static const struct regulator_ops mt6360_regulator_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_mode = mt6360_regulator_set_mode,
.get_mode = mt6360_regulator_get_mode,
.get_status = mt6360_regulator_get_status,
};
static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
{
switch (hw_mode) {
case MT6360_OPMODE_NORMAL:
return REGULATOR_MODE_NORMAL;
case MT6360_OPMODE_LP:
return REGULATOR_MODE_IDLE;
case MT6360_OPMODE_ULP:
return REGULATOR_MODE_STANDBY;
default:
return REGULATOR_MODE_INVALID;
}
}
#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \
mreg, mmask, streg, stmask, vranges, \
vcnts, offon_delay, irq_tbls) \
{ \
.desc = { \
.name = #_name, \
.supply_name = #_sname, \
.id = MT6360_REGULATOR_##_name, \
.of_match = of_match_ptr(#_name), \
.regulators_node = of_match_ptr("regulator"), \
.of_map_mode = mt6360_regulator_of_map_mode, \
.owner = THIS_MODULE, \
.ops = &mt6360_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.vsel_reg = vreg, \
.vsel_mask = vmask, \
.enable_reg = ereg, \
.enable_mask = emask, \
.linear_ranges = vranges, \
.n_linear_ranges = ARRAY_SIZE(vranges), \
.n_voltages = vcnts, \
.off_on_delay = offon_delay, \
}, \
.mode_reg = mreg, \
.mode_mask = mmask, \
.state_reg = streg, \
.state_mask = stmask, \
.irq_tables = irq_tbls, \
.irq_table_size = ARRAY_SIZE(irq_tbls), \
}
static const struct mt6360_regulator_desc mt6360_regulator_descs[] = {
MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
buck_vout_ranges, 256, 0, buck1_irq_tbls),
MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
buck_vout_ranges, 256, 0, buck2_irq_tbls),
MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
};
static int mt6360_regulator_irq_register(struct platform_device *pdev,
struct regulator_dev *rdev,
const struct mt6360_irq_mapping *tbls,
int tbl_size)
{
int i, irq, ret;
for (i = 0; i < tbl_size; i++) {
const struct mt6360_irq_mapping *irq_desc = tbls + i;
irq = platform_get_irq_byname(pdev, irq_desc->name);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, irq_desc->handler, 0,
irq_desc->name, rdev);
if (ret) {
dev_err(&pdev->dev, "Fail to request %s irq\n", irq_desc->name);
return ret;
}
}
return 0;
}
static int mt6360_regulator_probe(struct platform_device *pdev)
{
struct mt6360_regulator_data *mrd;
struct regulator_config config = {};
int i, ret;
mrd = devm_kzalloc(&pdev->dev, sizeof(*mrd), GFP_KERNEL);
if (!mrd)
return -ENOMEM;
mrd->dev = &pdev->dev;
mrd->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!mrd->regmap) {
dev_err(&pdev->dev, "Failed to get parent regmap\n");
return -ENODEV;
}
config.dev = pdev->dev.parent;
config.driver_data = mrd;
config.regmap = mrd->regmap;
for (i = 0; i < ARRAY_SIZE(mt6360_regulator_descs); i++) {
const struct mt6360_regulator_desc *rdesc = mt6360_regulator_descs + i;
struct regulator_dev *rdev;
rdev = devm_regulator_register(&pdev->dev, &rdesc->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register %d regulator\n", i);
return PTR_ERR(rdev);
}
ret = mt6360_regulator_irq_register(pdev, rdev, rdesc->irq_tables,
rdesc->irq_table_size);
if (ret) {
dev_err(&pdev->dev, "Failed to register %d regulator irqs\n", i);
return ret;
}
}
return 0;
}
static const struct platform_device_id mt6360_regulator_id_table[] = {
{ "mt6360-regulator", 0 },
{},
};
MODULE_DEVICE_TABLE(platform, mt6360_regulator_id_table);
static struct platform_driver mt6360_regulator_driver = {
.driver = {
.name = "mt6360-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mt6360_regulator_probe,
.id_table = mt6360_regulator_id_table,
};
module_platform_driver(mt6360_regulator_driver);
MODULE_AUTHOR("Gene Chen <[email protected]>");
MODULE_DESCRIPTION("MT6360 Regulator Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/mt6360-regulator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* max1586.c -- Voltage and current regulation for the Maxim 1586
*
* Copyright (C) 2008 Robert Jarzmik
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/regulator/max1586.h>
#include <linux/of_device.h>
#include <linux/regulator/of_regulator.h>
#define MAX1586_V3_MAX_VSEL 31
#define MAX1586_V6_MAX_VSEL 3
#define MAX1586_V3_MIN_UV 700000
#define MAX1586_V3_MAX_UV 1475000
#define MAX1586_V6_MIN_UV 0
#define MAX1586_V6_MAX_UV 3000000
#define I2C_V3_SELECT (0 << 5)
#define I2C_V6_SELECT (1 << 5)
struct max1586_data {
struct i2c_client *client;
/* min/max V3 voltage */
unsigned int min_uV;
unsigned int max_uV;
unsigned int v3_curr_sel;
unsigned int v6_curr_sel;
};
/*
* V6 voltage
* On I2C bus, sending a "x" byte to the max1586 means :
* set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3)
* As regulator framework doesn't accept voltages to be 0V, we use 1uV.
*/
static const unsigned int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
/*
* V3 voltage
* On I2C bus, sending a "x" byte to the max1586 means :
* set V3 to 0.700V + (x & 0x1f) * 0.025V
* This voltage can be increased by external resistors
* R24 and R25=100kOhm as described in the data sheet.
* The gain is approximately: 1 + R24/R25 + R24/185.5kOhm
*/
static int max1586_v3_get_voltage_sel(struct regulator_dev *rdev)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
return max1586->v3_curr_sel;
}
static int max1586_v3_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
struct i2c_client *client = max1586->client;
int ret;
u8 v3_prog;
dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
regulator_list_voltage_linear(rdev, selector) / 1000);
v3_prog = I2C_V3_SELECT | (u8) selector;
ret = i2c_smbus_write_byte(client, v3_prog);
if (ret)
return ret;
max1586->v3_curr_sel = selector;
return 0;
}
static int max1586_v6_get_voltage_sel(struct regulator_dev *rdev)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
return max1586->v6_curr_sel;
}
static int max1586_v6_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
struct i2c_client *client = max1586->client;
u8 v6_prog;
int ret;
dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
rdev->desc->volt_table[selector] / 1000);
v6_prog = I2C_V6_SELECT | (u8) selector;
ret = i2c_smbus_write_byte(client, v6_prog);
if (ret)
return ret;
max1586->v6_curr_sel = selector;
return 0;
}
/*
* The Maxim 1586 controls V3 and V6 voltages, but offers no way of reading back
* the set up value.
*/
static const struct regulator_ops max1586_v3_ops = {
.get_voltage_sel = max1586_v3_get_voltage_sel,
.set_voltage_sel = max1586_v3_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
};
static const struct regulator_ops max1586_v6_ops = {
.get_voltage_sel = max1586_v6_get_voltage_sel,
.set_voltage_sel = max1586_v6_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
};
static struct regulator_desc max1586_reg[] = {
{
.name = "Output_V3",
.id = MAX1586_V3,
.ops = &max1586_v3_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = MAX1586_V3_MAX_VSEL + 1,
.owner = THIS_MODULE,
},
{
.name = "Output_V6",
.id = MAX1586_V6,
.ops = &max1586_v6_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = MAX1586_V6_MAX_VSEL + 1,
.volt_table = v6_voltages_uv,
.owner = THIS_MODULE,
},
};
static int of_get_max1586_platform_data(struct device *dev,
struct max1586_platform_data *pdata)
{
struct max1586_subdev_data *sub;
struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)] = { };
struct device_node *np = dev->of_node;
int i, matched;
if (of_property_read_u32(np, "v3-gain",
&pdata->v3_gain) < 0) {
dev_err(dev, "%pOF has no 'v3-gain' property\n", np);
return -EINVAL;
}
np = of_get_child_by_name(np, "regulators");
if (!np) {
dev_err(dev, "missing 'regulators' subnode in DT\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(rmatch); i++)
rmatch[i].name = max1586_reg[i].name;
matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch));
of_node_put(np);
/*
* If matched is 0, ie. neither Output_V3 nor Output_V6 have been found,
* return 0, which signals the normal situation where no subregulator is
* available. This is normal because the max1586 doesn't provide any
* readback support, so the subregulators can't report any status
* anyway. If matched < 0, return the error.
*/
if (matched <= 0)
return matched;
pdata->subdevs = devm_kcalloc(dev,
matched,
sizeof(struct max1586_subdev_data),
GFP_KERNEL);
if (!pdata->subdevs)
return -ENOMEM;
pdata->num_subdevs = matched;
sub = pdata->subdevs;
for (i = 0; i < matched; i++) {
sub->id = i;
sub->name = rmatch[i].of_node->name;
sub->platform_data = rmatch[i].init_data;
sub++;
}
return 0;
}
static const struct of_device_id __maybe_unused max1586_of_match[] = {
{ .compatible = "maxim,max1586", },
{},
};
MODULE_DEVICE_TABLE(of, max1586_of_match);
static int max1586_pmic_probe(struct i2c_client *client)
{
struct max1586_platform_data *pdata, pdata_of;
struct regulator_config config = { };
struct max1586_data *max1586;
int i, id, ret;
const struct of_device_id *match;
pdata = dev_get_platdata(&client->dev);
if (client->dev.of_node && !pdata) {
match = of_match_device(of_match_ptr(max1586_of_match),
&client->dev);
if (!match) {
dev_err(&client->dev, "Error: No device match found\n");
return -ENODEV;
}
ret = of_get_max1586_platform_data(&client->dev, &pdata_of);
if (ret < 0)
return ret;
pdata = &pdata_of;
}
max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data),
GFP_KERNEL);
if (!max1586)
return -ENOMEM;
max1586->client = client;
if (!pdata->v3_gain)
return -EINVAL;
max1586->min_uV = MAX1586_V3_MIN_UV / 1000 * pdata->v3_gain / 1000;
max1586->max_uV = MAX1586_V3_MAX_UV / 1000 * pdata->v3_gain / 1000;
/* Set curr_sel to default voltage on power-up */
max1586->v3_curr_sel = 24; /* 1.3V */
max1586->v6_curr_sel = 0;
for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) {
struct regulator_dev *rdev;
id = pdata->subdevs[i].id;
if (!pdata->subdevs[i].platform_data)
continue;
if (id < MAX1586_V3 || id > MAX1586_V6) {
dev_err(&client->dev, "invalid regulator id %d\n", id);
return -EINVAL;
}
if (id == MAX1586_V3) {
max1586_reg[id].min_uV = max1586->min_uV;
max1586_reg[id].uV_step =
(max1586->max_uV - max1586->min_uV) /
MAX1586_V3_MAX_VSEL;
}
config.dev = &client->dev;
config.init_data = pdata->subdevs[i].platform_data;
config.driver_data = max1586;
rdev = devm_regulator_register(&client->dev,
&max1586_reg[id], &config);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
max1586_reg[id].name);
return PTR_ERR(rdev);
}
}
i2c_set_clientdata(client, max1586);
dev_info(&client->dev, "Maxim 1586 regulator driver loaded\n");
return 0;
}
static const struct i2c_device_id max1586_id[] = {
{ "max1586", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max1586_id);
static struct i2c_driver max1586_pmic_driver = {
.probe = max1586_pmic_probe,
.driver = {
.name = "max1586",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(max1586_of_match),
},
.id_table = max1586_id,
};
static int __init max1586_pmic_init(void)
{
return i2c_add_driver(&max1586_pmic_driver);
}
subsys_initcall(max1586_pmic_init);
static void __exit max1586_pmic_exit(void)
{
i2c_del_driver(&max1586_pmic_driver);
}
module_exit(max1586_pmic_exit);
/* Module information */
MODULE_DESCRIPTION("MAXIM 1586 voltage regulator driver");
MODULE_AUTHOR("Robert Jarzmik");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/max1586.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Voltage regulation driver for active-semi ACT8945A PMIC
*
* Copyright (C) 2015 Atmel Corporation
*
* Author: Wenyou Yang <[email protected]>
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <dt-bindings/regulator/active-semi,8945a-regulator.h>
/*
* ACT8945A Global Register Map.
*/
#define ACT8945A_SYS_MODE 0x00
#define ACT8945A_SYS_CTRL 0x01
#define ACT8945A_SYS_UNLK_REGS 0x0b
#define ACT8945A_DCDC1_VSET1 0x20
#define ACT8945A_DCDC1_VSET2 0x21
#define ACT8945A_DCDC1_CTRL 0x22
#define ACT8945A_DCDC1_SUS 0x24
#define ACT8945A_DCDC2_VSET1 0x30
#define ACT8945A_DCDC2_VSET2 0x31
#define ACT8945A_DCDC2_CTRL 0x32
#define ACT8945A_DCDC2_SUS 0x34
#define ACT8945A_DCDC3_VSET1 0x40
#define ACT8945A_DCDC3_VSET2 0x41
#define ACT8945A_DCDC3_CTRL 0x42
#define ACT8945A_DCDC3_SUS 0x44
#define ACT8945A_LDO1_VSET 0x50
#define ACT8945A_LDO1_CTRL 0x51
#define ACT8945A_LDO1_SUS 0x52
#define ACT8945A_LDO2_VSET 0x54
#define ACT8945A_LDO2_CTRL 0x55
#define ACT8945A_LDO2_SUS 0x56
#define ACT8945A_LDO3_VSET 0x60
#define ACT8945A_LDO3_CTRL 0x61
#define ACT8945A_LDO3_SUS 0x62
#define ACT8945A_LDO4_VSET 0x64
#define ACT8945A_LDO4_CTRL 0x65
#define ACT8945A_LDO4_SUS 0x66
/*
* Field Definitions.
*/
#define ACT8945A_ENA 0x80 /* ON - [7] */
#define ACT8945A_VSEL_MASK 0x3F /* VSET - [5:0] */
/*
* ACT8945A Voltage Number
*/
#define ACT8945A_VOLTAGE_NUM 64
enum {
ACT8945A_ID_DCDC1,
ACT8945A_ID_DCDC2,
ACT8945A_ID_DCDC3,
ACT8945A_ID_LDO1,
ACT8945A_ID_LDO2,
ACT8945A_ID_LDO3,
ACT8945A_ID_LDO4,
ACT8945A_ID_MAX,
};
struct act8945a_pmic {
struct regmap *regmap;
u32 op_mode[ACT8945A_ID_MAX];
};
static const struct linear_range act8945a_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
};
static int act8945a_set_suspend_state(struct regulator_dev *rdev, bool enable)
{
struct regmap *regmap = rdev->regmap;
int id = rdev_get_id(rdev);
int reg, val;
switch (id) {
case ACT8945A_ID_DCDC1:
reg = ACT8945A_DCDC1_SUS;
val = 0xa8;
break;
case ACT8945A_ID_DCDC2:
reg = ACT8945A_DCDC2_SUS;
val = 0xa8;
break;
case ACT8945A_ID_DCDC3:
reg = ACT8945A_DCDC3_SUS;
val = 0xa8;
break;
case ACT8945A_ID_LDO1:
reg = ACT8945A_LDO1_SUS;
val = 0xe8;
break;
case ACT8945A_ID_LDO2:
reg = ACT8945A_LDO2_SUS;
val = 0xe8;
break;
case ACT8945A_ID_LDO3:
reg = ACT8945A_LDO3_SUS;
val = 0xe8;
break;
case ACT8945A_ID_LDO4:
reg = ACT8945A_LDO4_SUS;
val = 0xe8;
break;
default:
return -EINVAL;
}
if (enable)
val |= BIT(4);
/*
* Ask the PMIC to enable/disable this output when entering hibernate
* mode.
*/
return regmap_write(regmap, reg, val);
}
static int act8945a_set_suspend_enable(struct regulator_dev *rdev)
{
return act8945a_set_suspend_state(rdev, true);
}
static int act8945a_set_suspend_disable(struct regulator_dev *rdev)
{
return act8945a_set_suspend_state(rdev, false);
}
static unsigned int act8945a_of_map_mode(unsigned int mode)
{
switch (mode) {
case ACT8945A_REGULATOR_MODE_FIXED:
case ACT8945A_REGULATOR_MODE_NORMAL:
return REGULATOR_MODE_NORMAL;
case ACT8945A_REGULATOR_MODE_LOWPOWER:
return REGULATOR_MODE_STANDBY;
default:
return REGULATOR_MODE_INVALID;
}
}
static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev->regmap;
int id = rdev_get_id(rdev);
int reg, ret, val = 0;
switch (id) {
case ACT8945A_ID_DCDC1:
reg = ACT8945A_DCDC1_CTRL;
break;
case ACT8945A_ID_DCDC2:
reg = ACT8945A_DCDC2_CTRL;
break;
case ACT8945A_ID_DCDC3:
reg = ACT8945A_DCDC3_CTRL;
break;
case ACT8945A_ID_LDO1:
reg = ACT8945A_LDO1_CTRL;
break;
case ACT8945A_ID_LDO2:
reg = ACT8945A_LDO2_CTRL;
break;
case ACT8945A_ID_LDO3:
reg = ACT8945A_LDO3_CTRL;
break;
case ACT8945A_ID_LDO4:
reg = ACT8945A_LDO4_CTRL;
break;
default:
return -EINVAL;
}
switch (mode) {
case REGULATOR_MODE_STANDBY:
if (id > ACT8945A_ID_DCDC3)
val = BIT(5);
break;
case REGULATOR_MODE_NORMAL:
if (id <= ACT8945A_ID_DCDC3)
val = BIT(5);
break;
default:
return -EINVAL;
}
ret = regmap_update_bits(regmap, reg, BIT(5), val);
if (ret)
return ret;
act8945a->op_mode[id] = mode;
return 0;
}
static unsigned int act8945a_get_mode(struct regulator_dev *rdev)
{
struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
if (id < ACT8945A_ID_DCDC1 || id >= ACT8945A_ID_MAX)
return -EINVAL;
return act8945a->op_mode[id];
}
static const struct regulator_ops act8945a_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = act8945a_set_mode,
.get_mode = act8945a_get_mode,
.is_enabled = regulator_is_enabled_regmap,
.set_suspend_enable = act8945a_set_suspend_enable,
.set_suspend_disable = act8945a_set_suspend_disable,
};
#define ACT89xx_REG(_name, _family, _id, _vsel_reg, _supply) \
[_family##_ID_##_id] = { \
.name = _name, \
.supply_name = _supply, \
.of_match = of_match_ptr("REG_"#_id), \
.of_map_mode = act8945a_of_map_mode, \
.regulators_node = of_match_ptr("regulators"), \
.id = _family##_ID_##_id, \
.type = REGULATOR_VOLTAGE, \
.ops = &act8945a_ops, \
.n_voltages = ACT8945A_VOLTAGE_NUM, \
.linear_ranges = act8945a_voltage_ranges, \
.n_linear_ranges = ARRAY_SIZE(act8945a_voltage_ranges), \
.vsel_reg = _family##_##_id##_##_vsel_reg, \
.vsel_mask = ACT8945A_VSEL_MASK, \
.enable_reg = _family##_##_id##_CTRL, \
.enable_mask = ACT8945A_ENA, \
.owner = THIS_MODULE, \
}
static const struct regulator_desc act8945a_regulators[] = {
ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET1, "vp1"),
ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET1, "vp2"),
ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET1, "vp3"),
ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
};
static const struct regulator_desc act8945a_alt_regulators[] = {
ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET2, "vp1"),
ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET2, "vp2"),
ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET2, "vp3"),
ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
};
static int act8945a_pmic_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
const struct regulator_desc *regulators;
struct act8945a_pmic *act8945a;
struct regulator_dev *rdev;
int i, num_regulators;
bool voltage_select;
act8945a = devm_kzalloc(&pdev->dev, sizeof(*act8945a), GFP_KERNEL);
if (!act8945a)
return -ENOMEM;
act8945a->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!act8945a->regmap) {
dev_err(&pdev->dev,
"could not retrieve regmap from parent device\n");
return -EINVAL;
}
voltage_select = of_property_read_bool(pdev->dev.parent->of_node,
"active-semi,vsel-high");
if (voltage_select) {
regulators = act8945a_alt_regulators;
num_regulators = ARRAY_SIZE(act8945a_alt_regulators);
} else {
regulators = act8945a_regulators;
num_regulators = ARRAY_SIZE(act8945a_regulators);
}
config.dev = &pdev->dev;
config.dev->of_node = pdev->dev.parent->of_node;
config.driver_data = act8945a;
for (i = 0; i < num_regulators; i++) {
rdev = devm_regulator_register(&pdev->dev, ®ulators[i],
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
regulators[i].name);
return PTR_ERR(rdev);
}
}
platform_set_drvdata(pdev, act8945a);
/* Unlock expert registers. */
return regmap_write(act8945a->regmap, ACT8945A_SYS_UNLK_REGS, 0xef);
}
static int __maybe_unused act8945a_suspend(struct device *pdev)
{
struct act8945a_pmic *act8945a = dev_get_drvdata(pdev);
/*
* Ask the PMIC to enter the suspend mode on the next PWRHLD
* transition.
*/
return regmap_write(act8945a->regmap, ACT8945A_SYS_CTRL, 0x42);
}
static SIMPLE_DEV_PM_OPS(act8945a_pm, act8945a_suspend, NULL);
static void act8945a_pmic_shutdown(struct platform_device *pdev)
{
struct act8945a_pmic *act8945a = platform_get_drvdata(pdev);
/*
* Ask the PMIC to shutdown everything on the next PWRHLD transition.
*/
regmap_write(act8945a->regmap, ACT8945A_SYS_CTRL, 0x0);
}
static struct platform_driver act8945a_pmic_driver = {
.driver = {
.name = "act8945a-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &act8945a_pm,
},
.probe = act8945a_pmic_probe,
.shutdown = act8945a_pmic_shutdown,
};
module_platform_driver(act8945a_pmic_driver);
MODULE_DESCRIPTION("Active-semi ACT8945A voltage regulator driver");
MODULE_AUTHOR("Wenyou Yang <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/act8945a-regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LP8755 High Performance Power Management Unit : System Interface Driver
* (based on rev. 0.26)
* Copyright 2012 Texas Instruments
*
* Author: Daniel(Geon Si) Jeong <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
#include <linux/uaccess.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/platform_data/lp8755.h>
#define LP8755_REG_BUCK0 0x00
#define LP8755_REG_BUCK1 0x03
#define LP8755_REG_BUCK2 0x04
#define LP8755_REG_BUCK3 0x01
#define LP8755_REG_BUCK4 0x05
#define LP8755_REG_BUCK5 0x02
#define LP8755_REG_MAX 0xFF
#define LP8755_BUCK_EN_M BIT(7)
#define LP8755_BUCK_LINEAR_OUT_MAX 0x76
#define LP8755_BUCK_VOUT_M 0x7F
struct lp8755_mphase {
int nreg;
int buck_num[LP8755_BUCK_MAX];
};
struct lp8755_chip {
struct device *dev;
struct regmap *regmap;
struct lp8755_platform_data *pdata;
int irq;
unsigned int irqmask;
int mphase;
struct regulator_dev *rdev[LP8755_BUCK_MAX];
};
static int lp8755_buck_enable_time(struct regulator_dev *rdev)
{
int ret;
unsigned int regval;
enum lp8755_bucks id = rdev_get_id(rdev);
ret = regmap_read(rdev->regmap, 0x12 + id, ®val);
if (ret < 0) {
dev_err(&rdev->dev, "i2c access error %s\n", __func__);
return ret;
}
return (regval & 0xff) * 100;
}
static int lp8755_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
int ret;
unsigned int regbval = 0x0;
enum lp8755_bucks id = rdev_get_id(rdev);
struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
switch (mode) {
case REGULATOR_MODE_FAST:
/* forced pwm mode */
regbval = (0x01 << id);
break;
case REGULATOR_MODE_NORMAL:
/* enable automatic pwm/pfm mode */
ret = regmap_update_bits(rdev->regmap, 0x08 + id, 0x20, 0x00);
if (ret < 0)
goto err_i2c;
break;
case REGULATOR_MODE_IDLE:
/* enable automatic pwm/pfm/lppfm mode */
ret = regmap_update_bits(rdev->regmap, 0x08 + id, 0x20, 0x20);
if (ret < 0)
goto err_i2c;
ret = regmap_update_bits(rdev->regmap, 0x10, 0x01, 0x01);
if (ret < 0)
goto err_i2c;
break;
default:
dev_err(pchip->dev, "Not supported buck mode %s\n", __func__);
/* forced pwm mode */
regbval = (0x01 << id);
}
ret = regmap_update_bits(rdev->regmap, 0x06, 0x01 << id, regbval);
if (ret < 0)
goto err_i2c;
return ret;
err_i2c:
dev_err(&rdev->dev, "i2c access error %s\n", __func__);
return ret;
}
static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
{
int ret;
unsigned int regval;
enum lp8755_bucks id = rdev_get_id(rdev);
ret = regmap_read(rdev->regmap, 0x06, ®val);
if (ret < 0)
goto err_i2c;
/* mode fast means forced pwm mode */
if (regval & (0x01 << id))
return REGULATOR_MODE_FAST;
ret = regmap_read(rdev->regmap, 0x08 + id, ®val);
if (ret < 0)
goto err_i2c;
/* mode idle means automatic pwm/pfm/lppfm mode */
if (regval & 0x20)
return REGULATOR_MODE_IDLE;
/* mode normal means automatic pwm/pfm mode */
return REGULATOR_MODE_NORMAL;
err_i2c:
dev_err(&rdev->dev, "i2c access error %s\n", __func__);
return 0;
}
static const unsigned int lp8755_buck_ramp_table[] = {
30000, 15000, 7500, 3800, 1900, 940, 470, 230
};
static const struct regulator_ops lp8755_buck_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp8755_buck_enable_time,
.set_mode = lp8755_buck_set_mode,
.get_mode = lp8755_buck_get_mode,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
};
#define lp8755_rail(_id) "lp8755_buck"#_id
#define lp8755_buck_init(_id)\
{\
.constraints = {\
.name = lp8755_rail(_id),\
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,\
.min_uV = 500000,\
.max_uV = 1675000,\
},\
}
static struct regulator_init_data lp8755_reg_default[LP8755_BUCK_MAX] = {
[LP8755_BUCK0] = lp8755_buck_init(0),
[LP8755_BUCK1] = lp8755_buck_init(1),
[LP8755_BUCK2] = lp8755_buck_init(2),
[LP8755_BUCK3] = lp8755_buck_init(3),
[LP8755_BUCK4] = lp8755_buck_init(4),
[LP8755_BUCK5] = lp8755_buck_init(5),
};
static const struct lp8755_mphase mphase_buck[MPHASE_CONF_MAX] = {
{ 3, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK5 } },
{ 6, { LP8755_BUCK0, LP8755_BUCK1, LP8755_BUCK2, LP8755_BUCK3,
LP8755_BUCK4, LP8755_BUCK5 } },
{ 5, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK4,
LP8755_BUCK5} },
{ 4, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK4, LP8755_BUCK5} },
{ 3, { LP8755_BUCK0, LP8755_BUCK4, LP8755_BUCK5} },
{ 2, { LP8755_BUCK0, LP8755_BUCK5} },
{ 1, { LP8755_BUCK0} },
{ 2, { LP8755_BUCK0, LP8755_BUCK3} },
{ 4, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK5} },
};
static int lp8755_init_data(struct lp8755_chip *pchip)
{
unsigned int regval;
int ret, icnt, buck_num;
struct lp8755_platform_data *pdata = pchip->pdata;
/* read back muti-phase configuration */
ret = regmap_read(pchip->regmap, 0x3D, ®val);
if (ret < 0)
goto out_i2c_error;
pchip->mphase = regval & 0x0F;
/* set default data based on multi-phase config */
for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
pdata->buck_data[buck_num] = &lp8755_reg_default[buck_num];
}
return ret;
out_i2c_error:
dev_err(pchip->dev, "i2c access error %s\n", __func__);
return ret;
}
#define lp8755_buck_desc(_id)\
{\
.name = lp8755_rail(_id),\
.id = LP8755_BUCK##_id,\
.ops = &lp8755_buck_ops,\
.n_voltages = LP8755_BUCK_LINEAR_OUT_MAX+1,\
.uV_step = 10000,\
.min_uV = 500000,\
.type = REGULATOR_VOLTAGE,\
.owner = THIS_MODULE,\
.enable_reg = LP8755_REG_BUCK##_id,\
.enable_mask = LP8755_BUCK_EN_M,\
.vsel_reg = LP8755_REG_BUCK##_id,\
.vsel_mask = LP8755_BUCK_VOUT_M,\
.ramp_reg = (LP8755_BUCK##_id) + 0x7,\
.ramp_mask = 0x7,\
.ramp_delay_table = lp8755_buck_ramp_table,\
.n_ramp_values = ARRAY_SIZE(lp8755_buck_ramp_table),\
}
static const struct regulator_desc lp8755_regulators[] = {
lp8755_buck_desc(0),
lp8755_buck_desc(1),
lp8755_buck_desc(2),
lp8755_buck_desc(3),
lp8755_buck_desc(4),
lp8755_buck_desc(5),
};
static int lp8755_regulator_init(struct lp8755_chip *pchip)
{
int ret, icnt, buck_num;
struct lp8755_platform_data *pdata = pchip->pdata;
struct regulator_config rconfig = { };
rconfig.regmap = pchip->regmap;
rconfig.dev = pchip->dev;
rconfig.driver_data = pchip;
for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
rconfig.init_data = pdata->buck_data[buck_num];
rconfig.of_node = pchip->dev->of_node;
pchip->rdev[buck_num] =
devm_regulator_register(pchip->dev,
&lp8755_regulators[buck_num], &rconfig);
if (IS_ERR(pchip->rdev[buck_num])) {
ret = PTR_ERR(pchip->rdev[buck_num]);
pchip->rdev[buck_num] = NULL;
dev_err(pchip->dev, "regulator init failed: buck %d\n",
buck_num);
return ret;
}
}
return 0;
}
static irqreturn_t lp8755_irq_handler(int irq, void *data)
{
int ret, icnt;
unsigned int flag0, flag1;
struct lp8755_chip *pchip = data;
/* read flag0 register */
ret = regmap_read(pchip->regmap, 0x0D, &flag0);
if (ret < 0)
goto err_i2c;
/* clear flag register to pull up int. pin */
ret = regmap_write(pchip->regmap, 0x0D, 0x00);
if (ret < 0)
goto err_i2c;
/* sent power fault detection event to specific regulator */
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if ((flag0 & (0x4 << icnt))
&& (pchip->irqmask & (0x04 << icnt))
&& (pchip->rdev[icnt] != NULL)) {
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_PWR_FAULT,
NULL);
}
/* read flag1 register */
ret = regmap_read(pchip->regmap, 0x0E, &flag1);
if (ret < 0)
goto err_i2c;
/* clear flag register to pull up int. pin */
ret = regmap_write(pchip->regmap, 0x0E, 0x00);
if (ret < 0)
goto err_i2c;
/* send OCP event to all regulator devices */
if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL) {
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OCP,
NULL);
}
/* send OVP event to all regulator devices */
if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL) {
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OVP,
NULL);
}
return IRQ_HANDLED;
err_i2c:
dev_err(pchip->dev, "i2c access error %s\n", __func__);
return IRQ_NONE;
}
static int lp8755_int_config(struct lp8755_chip *pchip)
{
int ret;
unsigned int regval;
if (pchip->irq == 0) {
dev_warn(pchip->dev, "not use interrupt : %s\n", __func__);
return 0;
}
ret = regmap_read(pchip->regmap, 0x0F, ®val);
if (ret < 0) {
dev_err(pchip->dev, "i2c access error %s\n", __func__);
return ret;
}
pchip->irqmask = regval;
return devm_request_threaded_irq(pchip->dev, pchip->irq, NULL,
lp8755_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"lp8755-irq", pchip);
}
static const struct regmap_config lp8755_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = LP8755_REG_MAX,
};
static int lp8755_probe(struct i2c_client *client)
{
int ret, icnt;
struct lp8755_chip *pchip;
struct lp8755_platform_data *pdata = dev_get_platdata(&client->dev);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "i2c functionality check fail.\n");
return -EOPNOTSUPP;
}
pchip = devm_kzalloc(&client->dev,
sizeof(struct lp8755_chip), GFP_KERNEL);
if (!pchip)
return -ENOMEM;
pchip->dev = &client->dev;
pchip->regmap = devm_regmap_init_i2c(client, &lp8755_regmap);
if (IS_ERR(pchip->regmap)) {
ret = PTR_ERR(pchip->regmap);
dev_err(&client->dev, "fail to allocate regmap %d\n", ret);
return ret;
}
i2c_set_clientdata(client, pchip);
if (pdata != NULL) {
pchip->pdata = pdata;
pchip->mphase = pdata->mphase;
} else {
pchip->pdata = devm_kzalloc(pchip->dev,
sizeof(struct lp8755_platform_data),
GFP_KERNEL);
if (!pchip->pdata)
return -ENOMEM;
ret = lp8755_init_data(pchip);
if (ret < 0) {
dev_err(&client->dev, "fail to initialize chip\n");
return ret;
}
}
ret = lp8755_regulator_init(pchip);
if (ret < 0) {
dev_err(&client->dev, "fail to initialize regulators\n");
goto err;
}
pchip->irq = client->irq;
ret = lp8755_int_config(pchip);
if (ret < 0) {
dev_err(&client->dev, "fail to irq config\n");
goto err;
}
return ret;
err:
/* output disable */
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
regmap_write(pchip->regmap, icnt, 0x00);
return ret;
}
static void lp8755_remove(struct i2c_client *client)
{
int icnt;
struct lp8755_chip *pchip = i2c_get_clientdata(client);
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
regmap_write(pchip->regmap, icnt, 0x00);
}
static const struct i2c_device_id lp8755_id[] = {
{LP8755_NAME, 0},
{}
};
MODULE_DEVICE_TABLE(i2c, lp8755_id);
static struct i2c_driver lp8755_i2c_driver = {
.driver = {
.name = LP8755_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = lp8755_probe,
.remove = lp8755_remove,
.id_table = lp8755_id,
};
static int __init lp8755_init(void)
{
return i2c_add_driver(&lp8755_i2c_driver);
}
subsys_initcall(lp8755_init);
static void __exit lp8755_exit(void)
{
i2c_del_driver(&lp8755_i2c_driver);
}
module_exit(lp8755_exit);
MODULE_DESCRIPTION("Texas Instruments lp8755 driver");
MODULE_AUTHOR("Daniel Jeong <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/lp8755.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tps51632-regulator.c -- TI TPS51632
*
* Regulator driver for TPS51632 3-2-1 Phase D-Cap Step Down Driverless
* Controller with serial VID control and DVFS.
*
* Copyright (c) 2012, NVIDIA Corporation.
*
* Author: Laxman Dewangan <[email protected]>
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/tps51632-regulator.h>
#include <linux/slab.h>
/* Register definitions */
#define TPS51632_VOLTAGE_SELECT_REG 0x0
#define TPS51632_VOLTAGE_BASE_REG 0x1
#define TPS51632_OFFSET_REG 0x2
#define TPS51632_IMON_REG 0x3
#define TPS51632_VMAX_REG 0x4
#define TPS51632_DVFS_CONTROL_REG 0x5
#define TPS51632_POWER_STATE_REG 0x6
#define TPS51632_SLEW_REGS 0x7
#define TPS51632_FAULT_REG 0x14
#define TPS51632_MAX_REG 0x15
#define TPS51632_VOUT_MASK 0x7F
#define TPS51632_VOUT_OFFSET_MASK 0x1F
#define TPS51632_VMAX_MASK 0x7F
#define TPS51632_VMAX_LOCK 0x80
/* TPS51632_DVFS_CONTROL_REG */
#define TPS51632_DVFS_PWMEN 0x1
#define TPS51632_DVFS_STEP_20 0x2
#define TPS51632_DVFS_VMAX_PG 0x4
#define TPS51632_DVFS_PWMRST 0x8
#define TPS51632_DVFS_OCA_EN 0x10
#define TPS51632_DVFS_FCCM 0x20
/* TPS51632_POWER_STATE_REG */
#define TPS51632_POWER_STATE_MASK 0x03
#define TPS51632_POWER_STATE_MULTI_PHASE_CCM 0x0
#define TPS51632_POWER_STATE_SINGLE_PHASE_CCM 0x1
#define TPS51632_POWER_STATE_SINGLE_PHASE_DCM 0x2
#define TPS51632_MIN_VOLTAGE 500000
#define TPS51632_MAX_VOLTAGE 1520000
#define TPS51632_VOLTAGE_STEP_10mV 10000
#define TPS51632_VOLTAGE_STEP_20mV 20000
#define TPS51632_MAX_VSEL 0x7F
#define TPS51632_MIN_VSEL 0x19
#define TPS51632_DEFAULT_RAMP_DELAY 6000
#define TPS51632_VOLT_VSEL(uV) \
(DIV_ROUND_UP(uV - TPS51632_MIN_VOLTAGE, \
TPS51632_VOLTAGE_STEP_10mV) + \
TPS51632_MIN_VSEL)
/* TPS51632 chip information */
struct tps51632_chip {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *rdev;
struct regmap *regmap;
};
static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
struct tps51632_chip *tps = rdev_get_drvdata(rdev);
int bit;
int ret;
if (ramp_delay == 0)
bit = 0;
else
bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
if (ret < 0)
dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
return ret;
}
static const struct regulator_ops tps51632_dcdc_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = tps51632_dcdc_set_ramp_delay,
};
static int tps51632_init_dcdc(struct tps51632_chip *tps,
struct tps51632_regulator_platform_data *pdata)
{
int ret;
uint8_t control = 0;
int vsel;
if (!pdata->enable_pwm_dvfs)
goto skip_pwm_config;
control |= TPS51632_DVFS_PWMEN;
vsel = TPS51632_VOLT_VSEL(pdata->base_voltage_uV);
ret = regmap_write(tps->regmap, TPS51632_VOLTAGE_BASE_REG, vsel);
if (ret < 0) {
dev_err(tps->dev, "BASE reg write failed, err %d\n", ret);
return ret;
}
if (pdata->dvfs_step_20mV)
control |= TPS51632_DVFS_STEP_20;
if (pdata->max_voltage_uV) {
unsigned int vmax;
/**
* TPS51632 hw behavior: VMAX register can be write only
* once as it get locked after first write. The lock get
* reset only when device is power-reset.
* Write register only when lock bit is not enabled.
*/
ret = regmap_read(tps->regmap, TPS51632_VMAX_REG, &vmax);
if (ret < 0) {
dev_err(tps->dev, "VMAX read failed, err %d\n", ret);
return ret;
}
if (!(vmax & TPS51632_VMAX_LOCK)) {
vsel = TPS51632_VOLT_VSEL(pdata->max_voltage_uV);
ret = regmap_write(tps->regmap, TPS51632_VMAX_REG,
vsel);
if (ret < 0) {
dev_err(tps->dev,
"VMAX write failed, err %d\n", ret);
return ret;
}
}
}
skip_pwm_config:
ret = regmap_write(tps->regmap, TPS51632_DVFS_CONTROL_REG, control);
if (ret < 0)
dev_err(tps->dev, "DVFS reg write failed, err %d\n", ret);
return ret;
}
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TPS51632_OFFSET_REG:
case TPS51632_FAULT_REG:
case TPS51632_IMON_REG:
return true;
default:
return false;
}
}
static bool is_read_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case 0x08 ... 0x0F:
return false;
default:
return true;
}
}
static bool is_write_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TPS51632_VOLTAGE_SELECT_REG:
case TPS51632_VOLTAGE_BASE_REG:
case TPS51632_VMAX_REG:
case TPS51632_DVFS_CONTROL_REG:
case TPS51632_POWER_STATE_REG:
case TPS51632_SLEW_REGS:
return true;
default:
return false;
}
}
static const struct regmap_config tps51632_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.writeable_reg = is_write_reg,
.readable_reg = is_read_reg,
.volatile_reg = is_volatile_reg,
.max_register = TPS51632_MAX_REG - 1,
.cache_type = REGCACHE_RBTREE,
};
#if defined(CONFIG_OF)
static const struct of_device_id tps51632_of_match[] = {
{ .compatible = "ti,tps51632",},
{},
};
MODULE_DEVICE_TABLE(of, tps51632_of_match);
static struct tps51632_regulator_platform_data *
of_get_tps51632_platform_data(struct device *dev,
const struct regulator_desc *desc)
{
struct tps51632_regulator_platform_data *pdata;
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node,
desc);
if (!pdata->reg_init_data) {
dev_err(dev, "Not able to get OF regulator init data\n");
return NULL;
}
pdata->enable_pwm_dvfs =
of_property_read_bool(np, "ti,enable-pwm-dvfs");
pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV");
pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? :
TPS51632_MIN_VOLTAGE;
pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? :
TPS51632_MAX_VOLTAGE;
return pdata;
}
#else
static struct tps51632_regulator_platform_data *
of_get_tps51632_platform_data(struct device *dev,
const struct regulator_desc *desc)
{
return NULL;
}
#endif
static int tps51632_probe(struct i2c_client *client)
{
struct tps51632_regulator_platform_data *pdata;
struct regulator_dev *rdev;
struct tps51632_chip *tps;
int ret;
struct regulator_config config = { };
if (client->dev.of_node) {
const struct of_device_id *match;
match = of_match_device(of_match_ptr(tps51632_of_match),
&client->dev);
if (!match) {
dev_err(&client->dev, "Error: No device match found\n");
return -ENODEV;
}
}
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
tps->dev = &client->dev;
tps->desc.name = client->name;
tps->desc.id = 0;
tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY;
tps->desc.min_uV = TPS51632_MIN_VOLTAGE;
tps->desc.uV_step = TPS51632_VOLTAGE_STEP_10mV;
tps->desc.linear_min_sel = TPS51632_MIN_VSEL;
tps->desc.n_voltages = TPS51632_MAX_VSEL + 1;
tps->desc.ops = &tps51632_dcdc_ops;
tps->desc.type = REGULATOR_VOLTAGE;
tps->desc.owner = THIS_MODULE;
pdata = dev_get_platdata(&client->dev);
if (!pdata && client->dev.of_node)
pdata = of_get_tps51632_platform_data(&client->dev, &tps->desc);
if (!pdata) {
dev_err(&client->dev, "No Platform data\n");
return -EINVAL;
}
if (pdata->enable_pwm_dvfs) {
if ((pdata->base_voltage_uV < TPS51632_MIN_VOLTAGE) ||
(pdata->base_voltage_uV > TPS51632_MAX_VOLTAGE)) {
dev_err(&client->dev, "Invalid base_voltage_uV setting\n");
return -EINVAL;
}
if ((pdata->max_voltage_uV) &&
((pdata->max_voltage_uV < TPS51632_MIN_VOLTAGE) ||
(pdata->max_voltage_uV > TPS51632_MAX_VOLTAGE))) {
dev_err(&client->dev, "Invalid max_voltage_uV setting\n");
return -EINVAL;
}
}
if (pdata->enable_pwm_dvfs)
tps->desc.vsel_reg = TPS51632_VOLTAGE_BASE_REG;
else
tps->desc.vsel_reg = TPS51632_VOLTAGE_SELECT_REG;
tps->desc.vsel_mask = TPS51632_VOUT_MASK;
tps->regmap = devm_regmap_init_i2c(client, &tps51632_regmap_config);
if (IS_ERR(tps->regmap)) {
ret = PTR_ERR(tps->regmap);
dev_err(&client->dev, "regmap init failed, err %d\n", ret);
return ret;
}
i2c_set_clientdata(client, tps);
ret = tps51632_init_dcdc(tps, pdata);
if (ret < 0) {
dev_err(tps->dev, "Init failed, err = %d\n", ret);
return ret;
}
/* Register the regulators */
config.dev = &client->dev;
config.init_data = pdata->reg_init_data;
config.driver_data = tps;
config.regmap = tps->regmap;
config.of_node = client->dev.of_node;
rdev = devm_regulator_register(&client->dev, &tps->desc, &config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "regulator register failed\n");
return PTR_ERR(rdev);
}
tps->rdev = rdev;
return 0;
}
static const struct i2c_device_id tps51632_id[] = {
{.name = "tps51632",},
{},
};
MODULE_DEVICE_TABLE(i2c, tps51632_id);
static struct i2c_driver tps51632_i2c_driver = {
.driver = {
.name = "tps51632",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(tps51632_of_match),
},
.probe = tps51632_probe,
.id_table = tps51632_id,
};
static int __init tps51632_init(void)
{
return i2c_add_driver(&tps51632_i2c_driver);
}
subsys_initcall(tps51632_init);
static void __exit tps51632_cleanup(void)
{
i2c_del_driver(&tps51632_i2c_driver);
}
module_exit(tps51632_cleanup);
MODULE_AUTHOR("Laxman Dewangan <[email protected]>");
MODULE_DESCRIPTION("TPS51632 voltage regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/tps51632-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// SY8827N regulator driver
//
// Copyright (C) 2020 Synaptics Incorporated
//
// Author: Jisheng Zhang <[email protected]>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#define SY8827N_VSEL0 0
#define SY8827N_BUCK_EN (1 << 7)
#define SY8827N_MODE (1 << 6)
#define SY8827N_VSEL1 1
#define SY8827N_CTRL 2
#define SY8827N_ID1 3
#define SY8827N_ID2 4
#define SY8827N_PGOOD 5
#define SY8827N_MAX (SY8827N_PGOOD + 1)
#define SY8827N_NVOLTAGES 64
#define SY8827N_VSELMIN 600000
#define SY8827N_VSELSTEP 12500
struct sy8827n_device_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_init_data *regulator;
struct gpio_desc *en_gpio;
unsigned int vsel_reg;
};
static int sy8827n_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct sy8827n_device_info *di = rdev_get_drvdata(rdev);
switch (mode) {
case REGULATOR_MODE_FAST:
regmap_update_bits(rdev->regmap, di->vsel_reg,
SY8827N_MODE, SY8827N_MODE);
break;
case REGULATOR_MODE_NORMAL:
regmap_update_bits(rdev->regmap, di->vsel_reg,
SY8827N_MODE, 0);
break;
default:
return -EINVAL;
}
return 0;
}
static unsigned int sy8827n_get_mode(struct regulator_dev *rdev)
{
struct sy8827n_device_info *di = rdev_get_drvdata(rdev);
u32 val;
int ret = 0;
ret = regmap_read(rdev->regmap, di->vsel_reg, &val);
if (ret < 0)
return ret;
if (val & SY8827N_MODE)
return REGULATOR_MODE_FAST;
else
return REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops sy8827n_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_mode = sy8827n_set_mode,
.get_mode = sy8827n_get_mode,
};
static int sy8827n_regulator_register(struct sy8827n_device_info *di,
struct regulator_config *config)
{
struct regulator_desc *rdesc = &di->desc;
struct regulator_dev *rdev;
rdesc->name = "sy8827n-reg";
rdesc->supply_name = "vin";
rdesc->ops = &sy8827n_regulator_ops;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->n_voltages = SY8827N_NVOLTAGES;
rdesc->enable_reg = di->vsel_reg;
rdesc->enable_mask = SY8827N_BUCK_EN;
rdesc->min_uV = SY8827N_VSELMIN;
rdesc->uV_step = SY8827N_VSELSTEP;
rdesc->vsel_reg = di->vsel_reg;
rdesc->vsel_mask = rdesc->n_voltages - 1;
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
return PTR_ERR_OR_ZERO(rdev);
}
static bool sy8827n_volatile_reg(struct device *dev, unsigned int reg)
{
if (reg == SY8827N_PGOOD)
return true;
return false;
}
static const struct regmap_config sy8827n_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = sy8827n_volatile_reg,
.num_reg_defaults_raw = SY8827N_MAX,
.cache_type = REGCACHE_FLAT,
};
static int sy8827n_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
struct sy8827n_device_info *di;
struct regulator_config config = { };
struct regmap *regmap;
int ret;
di = devm_kzalloc(dev, sizeof(struct sy8827n_device_info), GFP_KERNEL);
if (!di)
return -ENOMEM;
di->regulator = of_get_regulator_init_data(dev, np, &di->desc);
if (!di->regulator) {
dev_err(dev, "Platform data not found!\n");
return -EINVAL;
}
di->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(di->en_gpio))
return PTR_ERR(di->en_gpio);
if (of_property_read_bool(np, "silergy,vsel-state-high"))
di->vsel_reg = SY8827N_VSEL1;
else
di->vsel_reg = SY8827N_VSEL0;
di->dev = dev;
regmap = devm_regmap_init_i2c(client, &sy8827n_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to allocate regmap!\n");
return PTR_ERR(regmap);
}
i2c_set_clientdata(client, di);
config.dev = di->dev;
config.init_data = di->regulator;
config.regmap = regmap;
config.driver_data = di;
config.of_node = np;
ret = sy8827n_regulator_register(di, &config);
if (ret < 0)
dev_err(dev, "Failed to register regulator!\n");
return ret;
}
static const struct of_device_id sy8827n_dt_ids[] = {
{
.compatible = "silergy,sy8827n",
},
{ }
};
MODULE_DEVICE_TABLE(of, sy8827n_dt_ids);
static const struct i2c_device_id sy8827n_id[] = {
{ "sy8827n", },
{ },
};
MODULE_DEVICE_TABLE(i2c, sy8827n_id);
static struct i2c_driver sy8827n_regulator_driver = {
.driver = {
.name = "sy8827n-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sy8827n_dt_ids,
},
.probe = sy8827n_i2c_probe,
.id_table = sy8827n_id,
};
module_i2c_driver(sy8827n_regulator_driver);
MODULE_AUTHOR("Jisheng Zhang <[email protected]>");
MODULE_DESCRIPTION("SY8827N regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/sy8827n.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2022 Analog Devices, Inc.
* ADI Regulator driver for the MAX77540 and MAX77541
*/
#include <linux/mfd/max77541.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
static const struct regulator_ops max77541_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_pickable_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
.set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
};
static const struct linear_range max77540_buck_ranges[] = {
/* Ranges when VOLT_SEL bits are 0x00 */
REGULATOR_LINEAR_RANGE(500000, 0x00, 0x8B, 5000),
REGULATOR_LINEAR_RANGE(1200000, 0x8C, 0xFF, 0),
/* Ranges when VOLT_SEL bits are 0x40 */
REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x8B, 10000),
REGULATOR_LINEAR_RANGE(2400000, 0x8C, 0xFF, 0),
/* Ranges when VOLT_SEL bits are 0x80 */
REGULATOR_LINEAR_RANGE(2000000, 0x00, 0x9F, 20000),
REGULATOR_LINEAR_RANGE(5200000, 0xA0, 0xFF, 0),
};
static const struct linear_range max77541_buck_ranges[] = {
/* Ranges when VOLT_SEL bits are 0x00 */
REGULATOR_LINEAR_RANGE(300000, 0x00, 0xB3, 5000),
REGULATOR_LINEAR_RANGE(1200000, 0xB4, 0xFF, 0),
/* Ranges when VOLT_SEL bits are 0x40 */
REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x8B, 10000),
REGULATOR_LINEAR_RANGE(2400000, 0x8C, 0xFF, 0),
/* Ranges when VOLT_SEL bits are 0x80 */
REGULATOR_LINEAR_RANGE(2000000, 0x00, 0x9F, 20000),
REGULATOR_LINEAR_RANGE(5200000, 0xA0, 0xFF, 0),
};
static const unsigned int max77541_buck_volt_range_sel[] = {
0x0, 0x0, 0x1, 0x1, 0x2, 0x2,
};
enum max77541_regulators {
MAX77541_BUCK1 = 1,
MAX77541_BUCK2,
};
#define MAX77540_BUCK(_id, _ops) \
{ .id = MAX77541_BUCK ## _id, \
.name = "buck"#_id, \
.of_match = "buck"#_id, \
.regulators_node = "regulators", \
.enable_reg = MAX77541_REG_EN_CTRL, \
.enable_mask = MAX77541_BIT_M ## _id ## _EN, \
.ops = &(_ops), \
.type = REGULATOR_VOLTAGE, \
.linear_ranges = max77540_buck_ranges, \
.n_linear_ranges = ARRAY_SIZE(max77540_buck_ranges), \
.vsel_reg = MAX77541_REG_M ## _id ## _VOUT, \
.vsel_mask = MAX77541_BITS_MX_VOUT, \
.vsel_range_reg = MAX77541_REG_M ## _id ## _CFG1, \
.vsel_range_mask = MAX77541_BITS_MX_CFG1_RNG, \
.linear_range_selectors_bitfield = max77541_buck_volt_range_sel, \
.owner = THIS_MODULE, \
}
#define MAX77541_BUCK(_id, _ops) \
{ .id = MAX77541_BUCK ## _id, \
.name = "buck"#_id, \
.of_match = "buck"#_id, \
.regulators_node = "regulators", \
.enable_reg = MAX77541_REG_EN_CTRL, \
.enable_mask = MAX77541_BIT_M ## _id ## _EN, \
.ops = &(_ops), \
.type = REGULATOR_VOLTAGE, \
.linear_ranges = max77541_buck_ranges, \
.n_linear_ranges = ARRAY_SIZE(max77541_buck_ranges), \
.vsel_reg = MAX77541_REG_M ## _id ## _VOUT, \
.vsel_mask = MAX77541_BITS_MX_VOUT, \
.vsel_range_reg = MAX77541_REG_M ## _id ## _CFG1, \
.vsel_range_mask = MAX77541_BITS_MX_CFG1_RNG, \
.linear_range_selectors_bitfield = max77541_buck_volt_range_sel, \
.owner = THIS_MODULE, \
}
static const struct regulator_desc max77540_regulators_desc[] = {
MAX77540_BUCK(1, max77541_buck_ops),
MAX77540_BUCK(2, max77541_buck_ops),
};
static const struct regulator_desc max77541_regulators_desc[] = {
MAX77541_BUCK(1, max77541_buck_ops),
MAX77541_BUCK(2, max77541_buck_ops),
};
static int max77541_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = {};
const struct regulator_desc *desc;
struct device *dev = &pdev->dev;
struct regulator_dev *rdev;
struct max77541 *max77541 = dev_get_drvdata(dev->parent);
unsigned int i;
config.dev = dev->parent;
switch (max77541->id) {
case MAX77540:
desc = max77540_regulators_desc;
break;
case MAX77541:
desc = max77541_regulators_desc;
break;
default:
return -EINVAL;
}
for (i = 0; i < MAX77541_MAX_REGULATORS; i++) {
rdev = devm_regulator_register(dev, &desc[i], &config);
if (IS_ERR(rdev))
return dev_err_probe(dev, PTR_ERR(rdev),
"Failed to register regulator\n");
}
return 0;
}
static const struct platform_device_id max77541_regulator_platform_id[] = {
{ "max77540-regulator" },
{ "max77541-regulator" },
{ }
};
MODULE_DEVICE_TABLE(platform, max77541_regulator_platform_id);
static struct platform_driver max77541_regulator_driver = {
.driver = {
.name = "max77541-regulator",
},
.probe = max77541_regulator_probe,
.id_table = max77541_regulator_platform_id,
};
module_platform_driver(max77541_regulator_driver);
MODULE_AUTHOR("Okan Sahin <[email protected]>");
MODULE_DESCRIPTION("MAX77540/MAX77541 regulator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/max77541-regulator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* act8865-regulator.c - Voltage regulation for active-semi ACT88xx PMUs
*
* http://www.active-semi.com/products/power-management-units/act88xx/
*
* Copyright (C) 2013 Atmel Corporation
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/act8865.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/power_supply.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
#include <dt-bindings/regulator/active-semi,8865-regulator.h>
/*
* ACT8600 Global Register Map.
*/
#define ACT8600_SYS_MODE 0x00
#define ACT8600_SYS_CTRL 0x01
#define ACT8600_DCDC1_VSET 0x10
#define ACT8600_DCDC1_CTRL 0x12
#define ACT8600_DCDC2_VSET 0x20
#define ACT8600_DCDC2_CTRL 0x22
#define ACT8600_DCDC3_VSET 0x30
#define ACT8600_DCDC3_CTRL 0x32
#define ACT8600_SUDCDC4_VSET 0x40
#define ACT8600_SUDCDC4_CTRL 0x41
#define ACT8600_LDO5_VSET 0x50
#define ACT8600_LDO5_CTRL 0x51
#define ACT8600_LDO6_VSET 0x60
#define ACT8600_LDO6_CTRL 0x61
#define ACT8600_LDO7_VSET 0x70
#define ACT8600_LDO7_CTRL 0x71
#define ACT8600_LDO8_VSET 0x80
#define ACT8600_LDO8_CTRL 0x81
#define ACT8600_LDO910_CTRL 0x91
#define ACT8600_APCH0 0xA1
#define ACT8600_APCH1 0xA8
#define ACT8600_APCH2 0xA9
#define ACT8600_APCH_STAT 0xAA
#define ACT8600_OTG0 0xB0
#define ACT8600_OTG1 0xB2
/*
* ACT8846 Global Register Map.
*/
#define ACT8846_SYS0 0x00
#define ACT8846_SYS1 0x01
#define ACT8846_REG1_VSET 0x10
#define ACT8846_REG1_CTRL 0x12
#define ACT8846_REG2_VSET0 0x20
#define ACT8846_REG2_VSET1 0x21
#define ACT8846_REG2_CTRL 0x22
#define ACT8846_REG3_VSET0 0x30
#define ACT8846_REG3_VSET1 0x31
#define ACT8846_REG3_CTRL 0x32
#define ACT8846_REG4_VSET0 0x40
#define ACT8846_REG4_VSET1 0x41
#define ACT8846_REG4_CTRL 0x42
#define ACT8846_REG5_VSET 0x50
#define ACT8846_REG5_CTRL 0x51
#define ACT8846_REG6_VSET 0x58
#define ACT8846_REG6_CTRL 0x59
#define ACT8846_REG7_VSET 0x60
#define ACT8846_REG7_CTRL 0x61
#define ACT8846_REG8_VSET 0x68
#define ACT8846_REG8_CTRL 0x69
#define ACT8846_REG9_VSET 0x70
#define ACT8846_REG9_CTRL 0x71
#define ACT8846_REG10_VSET 0x80
#define ACT8846_REG10_CTRL 0x81
#define ACT8846_REG11_VSET 0x90
#define ACT8846_REG11_CTRL 0x91
#define ACT8846_REG12_VSET 0xa0
#define ACT8846_REG12_CTRL 0xa1
#define ACT8846_REG13_CTRL 0xb1
#define ACT8846_GLB_OFF_CTRL 0xc3
#define ACT8846_OFF_SYSMASK 0x18
/*
* ACT8865 Global Register Map.
*/
#define ACT8865_SYS_MODE 0x00
#define ACT8865_SYS_CTRL 0x01
#define ACT8865_SYS_UNLK_REGS 0x0b
#define ACT8865_DCDC1_VSET1 0x20
#define ACT8865_DCDC1_VSET2 0x21
#define ACT8865_DCDC1_CTRL 0x22
#define ACT8865_DCDC1_SUS 0x24
#define ACT8865_DCDC2_VSET1 0x30
#define ACT8865_DCDC2_VSET2 0x31
#define ACT8865_DCDC2_CTRL 0x32
#define ACT8865_DCDC2_SUS 0x34
#define ACT8865_DCDC3_VSET1 0x40
#define ACT8865_DCDC3_VSET2 0x41
#define ACT8865_DCDC3_CTRL 0x42
#define ACT8865_DCDC3_SUS 0x44
#define ACT8865_LDO1_VSET 0x50
#define ACT8865_LDO1_CTRL 0x51
#define ACT8865_LDO1_SUS 0x52
#define ACT8865_LDO2_VSET 0x54
#define ACT8865_LDO2_CTRL 0x55
#define ACT8865_LDO2_SUS 0x56
#define ACT8865_LDO3_VSET 0x60
#define ACT8865_LDO3_CTRL 0x61
#define ACT8865_LDO3_SUS 0x62
#define ACT8865_LDO4_VSET 0x64
#define ACT8865_LDO4_CTRL 0x65
#define ACT8865_LDO4_SUS 0x66
#define ACT8865_MSTROFF 0x20
/*
* Field Definitions.
*/
#define ACT8865_ENA 0x80 /* ON - [7] */
#define ACT8865_DIS 0x40 /* DIS - [6] */
#define ACT8865_VSEL_MASK 0x3F /* VSET - [5:0] */
#define ACT8600_LDO10_ENA 0x40 /* ON - [6] */
#define ACT8600_SUDCDC_VSEL_MASK 0xFF /* SUDCDC VSET - [7:0] */
#define ACT8600_APCH_CHG_ACIN BIT(7)
#define ACT8600_APCH_CHG_USB BIT(6)
#define ACT8600_APCH_CSTATE0 BIT(5)
#define ACT8600_APCH_CSTATE1 BIT(4)
/*
* ACT8865 voltage number
*/
#define ACT8865_VOLTAGE_NUM 64
#define ACT8600_SUDCDC_VOLTAGE_NUM 256
struct act8865 {
struct regmap *regmap;
int off_reg;
int off_mask;
};
static const struct regmap_range act8600_reg_ranges[] = {
regmap_reg_range(0x00, 0x01),
regmap_reg_range(0x10, 0x10),
regmap_reg_range(0x12, 0x12),
regmap_reg_range(0x20, 0x20),
regmap_reg_range(0x22, 0x22),
regmap_reg_range(0x30, 0x30),
regmap_reg_range(0x32, 0x32),
regmap_reg_range(0x40, 0x41),
regmap_reg_range(0x50, 0x51),
regmap_reg_range(0x60, 0x61),
regmap_reg_range(0x70, 0x71),
regmap_reg_range(0x80, 0x81),
regmap_reg_range(0x91, 0x91),
regmap_reg_range(0xA1, 0xA1),
regmap_reg_range(0xA8, 0xAA),
regmap_reg_range(0xB0, 0xB0),
regmap_reg_range(0xB2, 0xB2),
regmap_reg_range(0xC1, 0xC1),
};
static const struct regmap_range act8600_reg_ro_ranges[] = {
regmap_reg_range(0xAA, 0xAA),
regmap_reg_range(0xC1, 0xC1),
};
static const struct regmap_range act8600_reg_volatile_ranges[] = {
regmap_reg_range(0x00, 0x01),
regmap_reg_range(0x12, 0x12),
regmap_reg_range(0x22, 0x22),
regmap_reg_range(0x32, 0x32),
regmap_reg_range(0x41, 0x41),
regmap_reg_range(0x51, 0x51),
regmap_reg_range(0x61, 0x61),
regmap_reg_range(0x71, 0x71),
regmap_reg_range(0x81, 0x81),
regmap_reg_range(0xA8, 0xA8),
regmap_reg_range(0xAA, 0xAA),
regmap_reg_range(0xB0, 0xB0),
regmap_reg_range(0xC1, 0xC1),
};
static const struct regmap_access_table act8600_write_ranges_table = {
.yes_ranges = act8600_reg_ranges,
.n_yes_ranges = ARRAY_SIZE(act8600_reg_ranges),
.no_ranges = act8600_reg_ro_ranges,
.n_no_ranges = ARRAY_SIZE(act8600_reg_ro_ranges),
};
static const struct regmap_access_table act8600_read_ranges_table = {
.yes_ranges = act8600_reg_ranges,
.n_yes_ranges = ARRAY_SIZE(act8600_reg_ranges),
};
static const struct regmap_access_table act8600_volatile_ranges_table = {
.yes_ranges = act8600_reg_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(act8600_reg_volatile_ranges),
};
static const struct regmap_config act8600_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xFF,
.wr_table = &act8600_write_ranges_table,
.rd_table = &act8600_read_ranges_table,
.volatile_table = &act8600_volatile_ranges_table,
};
static const struct regmap_config act8865_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static const struct linear_range act8865_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
};
static const struct linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
};
static int act8865_set_suspend_state(struct regulator_dev *rdev, bool enable)
{
struct regmap *regmap = rdev->regmap;
int id = rdev->desc->id, reg, val;
switch (id) {
case ACT8865_ID_DCDC1:
reg = ACT8865_DCDC1_SUS;
val = 0xa8;
break;
case ACT8865_ID_DCDC2:
reg = ACT8865_DCDC2_SUS;
val = 0xa8;
break;
case ACT8865_ID_DCDC3:
reg = ACT8865_DCDC3_SUS;
val = 0xa8;
break;
case ACT8865_ID_LDO1:
reg = ACT8865_LDO1_SUS;
val = 0xe8;
break;
case ACT8865_ID_LDO2:
reg = ACT8865_LDO2_SUS;
val = 0xe8;
break;
case ACT8865_ID_LDO3:
reg = ACT8865_LDO3_SUS;
val = 0xe8;
break;
case ACT8865_ID_LDO4:
reg = ACT8865_LDO4_SUS;
val = 0xe8;
break;
default:
return -EINVAL;
}
if (enable)
val |= BIT(4);
/*
* Ask the PMIC to enable/disable this output when entering hibernate
* mode.
*/
return regmap_write(regmap, reg, val);
}
static int act8865_set_suspend_enable(struct regulator_dev *rdev)
{
return act8865_set_suspend_state(rdev, true);
}
static int act8865_set_suspend_disable(struct regulator_dev *rdev)
{
return act8865_set_suspend_state(rdev, false);
}
static unsigned int act8865_of_map_mode(unsigned int mode)
{
switch (mode) {
case ACT8865_REGULATOR_MODE_FIXED:
return REGULATOR_MODE_FAST;
case ACT8865_REGULATOR_MODE_NORMAL:
return REGULATOR_MODE_NORMAL;
case ACT8865_REGULATOR_MODE_LOWPOWER:
return REGULATOR_MODE_STANDBY;
default:
return REGULATOR_MODE_INVALID;
}
}
static int act8865_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct regmap *regmap = rdev->regmap;
int id = rdev_get_id(rdev);
int reg, val = 0;
switch (id) {
case ACT8865_ID_DCDC1:
reg = ACT8865_DCDC1_CTRL;
break;
case ACT8865_ID_DCDC2:
reg = ACT8865_DCDC2_CTRL;
break;
case ACT8865_ID_DCDC3:
reg = ACT8865_DCDC3_CTRL;
break;
case ACT8865_ID_LDO1:
reg = ACT8865_LDO1_CTRL;
break;
case ACT8865_ID_LDO2:
reg = ACT8865_LDO2_CTRL;
break;
case ACT8865_ID_LDO3:
reg = ACT8865_LDO3_CTRL;
break;
case ACT8865_ID_LDO4:
reg = ACT8865_LDO4_CTRL;
break;
default:
return -EINVAL;
}
switch (mode) {
case REGULATOR_MODE_FAST:
case REGULATOR_MODE_NORMAL:
if (id <= ACT8865_ID_DCDC3)
val = BIT(5);
break;
case REGULATOR_MODE_STANDBY:
if (id > ACT8865_ID_DCDC3)
val = BIT(5);
break;
default:
return -EINVAL;
}
return regmap_update_bits(regmap, reg, BIT(5), val);
}
static unsigned int act8865_get_mode(struct regulator_dev *rdev)
{
struct regmap *regmap = rdev->regmap;
int id = rdev_get_id(rdev);
int reg, ret, val = 0;
switch (id) {
case ACT8865_ID_DCDC1:
reg = ACT8865_DCDC1_CTRL;
break;
case ACT8865_ID_DCDC2:
reg = ACT8865_DCDC2_CTRL;
break;
case ACT8865_ID_DCDC3:
reg = ACT8865_DCDC3_CTRL;
break;
case ACT8865_ID_LDO1:
reg = ACT8865_LDO1_CTRL;
break;
case ACT8865_ID_LDO2:
reg = ACT8865_LDO2_CTRL;
break;
case ACT8865_ID_LDO3:
reg = ACT8865_LDO3_CTRL;
break;
case ACT8865_ID_LDO4:
reg = ACT8865_LDO4_CTRL;
break;
default:
return -EINVAL;
}
ret = regmap_read(regmap, reg, &val);
if (ret)
return ret;
if (id <= ACT8865_ID_DCDC3 && (val & BIT(5)))
return REGULATOR_MODE_FAST;
else if (id > ACT8865_ID_DCDC3 && !(val & BIT(5)))
return REGULATOR_MODE_NORMAL;
else
return REGULATOR_MODE_STANDBY;
}
static const struct regulator_ops act8865_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = act8865_set_mode,
.get_mode = act8865_get_mode,
.is_enabled = regulator_is_enabled_regmap,
.set_suspend_enable = act8865_set_suspend_enable,
.set_suspend_disable = act8865_set_suspend_disable,
};
static const struct regulator_ops act8865_ldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_mode = act8865_set_mode,
.get_mode = act8865_get_mode,
.is_enabled = regulator_is_enabled_regmap,
.set_suspend_enable = act8865_set_suspend_enable,
.set_suspend_disable = act8865_set_suspend_disable,
.set_pull_down = regulator_set_pull_down_regmap,
};
static const struct regulator_ops act8865_fixed_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
#define ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, _ops) \
[_family##_ID_##_id] = { \
.name = _name, \
.of_match = of_match_ptr(_name), \
.of_map_mode = act8865_of_map_mode, \
.regulators_node = of_match_ptr("regulators"), \
.supply_name = _supply, \
.id = _family##_ID_##_id, \
.type = REGULATOR_VOLTAGE, \
.ops = _ops, \
.n_voltages = ACT8865_VOLTAGE_NUM, \
.linear_ranges = act8865_voltage_ranges, \
.n_linear_ranges = ARRAY_SIZE(act8865_voltage_ranges), \
.vsel_reg = _family##_##_id##_##_vsel_reg, \
.vsel_mask = ACT8865_VSEL_MASK, \
.enable_reg = _family##_##_id##_CTRL, \
.enable_mask = ACT8865_ENA, \
.pull_down_reg = _family##_##_id##_CTRL, \
.pull_down_mask = ACT8865_DIS, \
.owner = THIS_MODULE, \
}
#define ACT88xx_REG(_name, _family, _id, _vsel_reg, _supply) \
ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, &act8865_ops)
#define ACT88xx_LDO(_name, _family, _id, _vsel_reg, _supply) \
ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, &act8865_ldo_ops)
static const struct regulator_desc act8600_regulators[] = {
ACT88xx_REG("DCDC1", ACT8600, DCDC1, VSET, "vp1"),
ACT88xx_REG("DCDC2", ACT8600, DCDC2, VSET, "vp2"),
ACT88xx_REG("DCDC3", ACT8600, DCDC3, VSET, "vp3"),
{
.name = "SUDCDC_REG4",
.of_match = of_match_ptr("SUDCDC_REG4"),
.regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_SUDCDC4,
.ops = &act8865_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = ACT8600_SUDCDC_VOLTAGE_NUM,
.linear_ranges = act8600_sudcdc_voltage_ranges,
.n_linear_ranges = ARRAY_SIZE(act8600_sudcdc_voltage_ranges),
.vsel_reg = ACT8600_SUDCDC4_VSET,
.vsel_mask = ACT8600_SUDCDC_VSEL_MASK,
.enable_reg = ACT8600_SUDCDC4_CTRL,
.enable_mask = ACT8865_ENA,
.owner = THIS_MODULE,
},
ACT88xx_REG("LDO5", ACT8600, LDO5, VSET, "inl"),
ACT88xx_REG("LDO6", ACT8600, LDO6, VSET, "inl"),
ACT88xx_REG("LDO7", ACT8600, LDO7, VSET, "inl"),
ACT88xx_REG("LDO8", ACT8600, LDO8, VSET, "inl"),
{
.name = "LDO_REG9",
.of_match = of_match_ptr("LDO_REG9"),
.regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO9,
.ops = &act8865_fixed_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = 1,
.fixed_uV = 3300000,
.enable_reg = ACT8600_LDO910_CTRL,
.enable_mask = ACT8865_ENA,
.owner = THIS_MODULE,
},
{
.name = "LDO_REG10",
.of_match = of_match_ptr("LDO_REG10"),
.regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO10,
.ops = &act8865_fixed_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = 1,
.fixed_uV = 1200000,
.enable_reg = ACT8600_LDO910_CTRL,
.enable_mask = ACT8600_LDO10_ENA,
.owner = THIS_MODULE,
},
};
static const struct regulator_desc act8846_regulators[] = {
ACT88xx_REG("REG1", ACT8846, REG1, VSET, "vp1"),
ACT88xx_REG("REG2", ACT8846, REG2, VSET0, "vp2"),
ACT88xx_REG("REG3", ACT8846, REG3, VSET0, "vp3"),
ACT88xx_REG("REG4", ACT8846, REG4, VSET0, "vp4"),
ACT88xx_REG("REG5", ACT8846, REG5, VSET, "inl1"),
ACT88xx_REG("REG6", ACT8846, REG6, VSET, "inl1"),
ACT88xx_REG("REG7", ACT8846, REG7, VSET, "inl1"),
ACT88xx_REG("REG8", ACT8846, REG8, VSET, "inl2"),
ACT88xx_REG("REG9", ACT8846, REG9, VSET, "inl2"),
ACT88xx_REG("REG10", ACT8846, REG10, VSET, "inl3"),
ACT88xx_REG("REG11", ACT8846, REG11, VSET, "inl3"),
ACT88xx_REG("REG12", ACT8846, REG12, VSET, "inl3"),
};
static const struct regulator_desc act8865_regulators[] = {
ACT88xx_REG("DCDC_REG1", ACT8865, DCDC1, VSET1, "vp1"),
ACT88xx_REG("DCDC_REG2", ACT8865, DCDC2, VSET1, "vp2"),
ACT88xx_REG("DCDC_REG3", ACT8865, DCDC3, VSET1, "vp3"),
ACT88xx_LDO("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
ACT88xx_LDO("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
ACT88xx_LDO("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
ACT88xx_LDO("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
};
static const struct regulator_desc act8865_alt_regulators[] = {
ACT88xx_REG("DCDC_REG1", ACT8865, DCDC1, VSET2, "vp1"),
ACT88xx_REG("DCDC_REG2", ACT8865, DCDC2, VSET2, "vp2"),
ACT88xx_REG("DCDC_REG3", ACT8865, DCDC3, VSET2, "vp3"),
ACT88xx_LDO("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
ACT88xx_LDO("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
ACT88xx_LDO("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
ACT88xx_LDO("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
};
#ifdef CONFIG_OF
static const struct of_device_id act8865_dt_ids[] = {
{ .compatible = "active-semi,act8600", .data = (void *)ACT8600 },
{ .compatible = "active-semi,act8846", .data = (void *)ACT8846 },
{ .compatible = "active-semi,act8865", .data = (void *)ACT8865 },
{ }
};
MODULE_DEVICE_TABLE(of, act8865_dt_ids);
#endif
static struct act8865_regulator_data *act8865_get_regulator_data(
int id, struct act8865_platform_data *pdata)
{
int i;
for (i = 0; i < pdata->num_regulators; i++) {
if (pdata->regulators[i].id == id)
return &pdata->regulators[i];
}
return NULL;
}
static struct i2c_client *act8865_i2c_client;
static void act8865_power_off(void)
{
struct act8865 *act8865;
act8865 = i2c_get_clientdata(act8865_i2c_client);
regmap_write(act8865->regmap, act8865->off_reg, act8865->off_mask);
while (1);
}
static int act8600_charger_get_status(struct regmap *map)
{
unsigned int val;
int ret;
u8 state0, state1;
ret = regmap_read(map, ACT8600_APCH_STAT, &val);
if (ret < 0)
return ret;
state0 = val & ACT8600_APCH_CSTATE0;
state1 = val & ACT8600_APCH_CSTATE1;
if (state0 && !state1)
return POWER_SUPPLY_STATUS_CHARGING;
if (!state0 && state1)
return POWER_SUPPLY_STATUS_NOT_CHARGING;
if (!state0 && !state1)
return POWER_SUPPLY_STATUS_DISCHARGING;
return POWER_SUPPLY_STATUS_UNKNOWN;
}
static int act8600_charger_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
struct regmap *map = power_supply_get_drvdata(psy);
int ret;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
ret = act8600_charger_get_status(map);
if (ret < 0)
return ret;
val->intval = ret;
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property act8600_charger_properties[] = {
POWER_SUPPLY_PROP_STATUS,
};
static const struct power_supply_desc act8600_charger_desc = {
.name = "act8600-charger",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = act8600_charger_properties,
.num_properties = ARRAY_SIZE(act8600_charger_properties),
.get_property = act8600_charger_get_property,
};
static int act8600_charger_probe(struct device *dev, struct regmap *regmap)
{
struct power_supply *charger;
struct power_supply_config cfg = {
.drv_data = regmap,
.of_node = dev->of_node,
};
charger = devm_power_supply_register(dev, &act8600_charger_desc, &cfg);
return PTR_ERR_OR_ZERO(charger);
}
static int act8865_pmic_probe(struct i2c_client *client)
{
const struct i2c_device_id *i2c_id = i2c_client_get_device_id(client);
const struct regulator_desc *regulators;
struct act8865_platform_data *pdata = NULL;
struct device *dev = &client->dev;
int i, ret, num_regulators;
struct act8865 *act8865;
const struct regmap_config *regmap_config;
unsigned long type;
int off_reg, off_mask;
int voltage_select = 0;
if (dev->of_node) {
const struct of_device_id *id;
id = of_match_device(of_match_ptr(act8865_dt_ids), dev);
if (!id)
return -ENODEV;
type = (unsigned long) id->data;
voltage_select = !!of_get_property(dev->of_node,
"active-semi,vsel-high",
NULL);
} else {
type = i2c_id->driver_data;
pdata = dev_get_platdata(dev);
}
switch (type) {
case ACT8600:
regulators = act8600_regulators;
num_regulators = ARRAY_SIZE(act8600_regulators);
regmap_config = &act8600_regmap_config;
off_reg = -1;
off_mask = -1;
break;
case ACT8846:
regulators = act8846_regulators;
num_regulators = ARRAY_SIZE(act8846_regulators);
regmap_config = &act8865_regmap_config;
off_reg = ACT8846_GLB_OFF_CTRL;
off_mask = ACT8846_OFF_SYSMASK;
break;
case ACT8865:
if (voltage_select) {
regulators = act8865_alt_regulators;
num_regulators = ARRAY_SIZE(act8865_alt_regulators);
} else {
regulators = act8865_regulators;
num_regulators = ARRAY_SIZE(act8865_regulators);
}
regmap_config = &act8865_regmap_config;
off_reg = ACT8865_SYS_CTRL;
off_mask = ACT8865_MSTROFF;
break;
default:
dev_err(dev, "invalid device id %lu\n", type);
return -EINVAL;
}
act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
if (!act8865)
return -ENOMEM;
act8865->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(act8865->regmap)) {
ret = PTR_ERR(act8865->regmap);
dev_err(dev, "Failed to allocate register map: %d\n", ret);
return ret;
}
if (of_device_is_system_power_controller(dev->of_node)) {
if (!pm_power_off && (off_reg > 0)) {
act8865_i2c_client = client;
act8865->off_reg = off_reg;
act8865->off_mask = off_mask;
pm_power_off = act8865_power_off;
} else {
dev_err(dev, "Failed to set poweroff capability, already defined\n");
}
}
/* Finally register devices */
for (i = 0; i < num_regulators; i++) {
const struct regulator_desc *desc = ®ulators[i];
struct regulator_config config = { };
struct regulator_dev *rdev;
config.dev = dev;
config.driver_data = act8865;
config.regmap = act8865->regmap;
if (pdata) {
struct act8865_regulator_data *rdata;
rdata = act8865_get_regulator_data(desc->id, pdata);
if (rdata) {
config.init_data = rdata->init_data;
config.of_node = rdata->of_node;
}
}
rdev = devm_regulator_register(dev, desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", desc->name);
return PTR_ERR(rdev);
}
}
if (type == ACT8600) {
ret = act8600_charger_probe(dev, act8865->regmap);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to probe charger");
return ret;
}
}
i2c_set_clientdata(client, act8865);
/* Unlock expert registers for ACT8865. */
return type != ACT8865 ? 0 : regmap_write(act8865->regmap,
ACT8865_SYS_UNLK_REGS, 0xef);
}
static const struct i2c_device_id act8865_ids[] = {
{ .name = "act8600", .driver_data = ACT8600 },
{ .name = "act8846", .driver_data = ACT8846 },
{ .name = "act8865", .driver_data = ACT8865 },
{ },
};
MODULE_DEVICE_TABLE(i2c, act8865_ids);
static struct i2c_driver act8865_pmic_driver = {
.driver = {
.name = "act8865",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = act8865_pmic_probe,
.id_table = act8865_ids,
};
module_i2c_driver(act8865_pmic_driver);
MODULE_DESCRIPTION("active-semi act88xx voltage regulator driver");
MODULE_AUTHOR("Wenyou Yang <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/act8865-regulator.c |
// SPDX-License-Identifier: GPL-2.0+
//
// SLG51000 High PSRR, Multi-Output Regulators
// Copyright (C) 2019 Dialog Semiconductor
//
// Author: Eric Jeong <[email protected]>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include "slg51000-regulator.h"
#define SLG51000_SCTL_EVT 7
#define SLG51000_MAX_EVT_REGISTER 8
#define SLG51000_LDOHP_LV_MIN 1200000
#define SLG51000_LDOHP_HV_MIN 2400000
enum slg51000_regulators {
SLG51000_REGULATOR_LDO1 = 0,
SLG51000_REGULATOR_LDO2,
SLG51000_REGULATOR_LDO3,
SLG51000_REGULATOR_LDO4,
SLG51000_REGULATOR_LDO5,
SLG51000_REGULATOR_LDO6,
SLG51000_REGULATOR_LDO7,
SLG51000_MAX_REGULATORS,
};
struct slg51000 {
struct device *dev;
struct regmap *regmap;
struct regulator_desc *rdesc[SLG51000_MAX_REGULATORS];
struct regulator_dev *rdev[SLG51000_MAX_REGULATORS];
struct gpio_desc *cs_gpiod;
int chip_irq;
};
struct slg51000_evt_sta {
unsigned int ereg;
unsigned int sreg;
};
static const struct slg51000_evt_sta es_reg[SLG51000_MAX_EVT_REGISTER] = {
{SLG51000_LDO1_EVENT, SLG51000_LDO1_STATUS},
{SLG51000_LDO2_EVENT, SLG51000_LDO2_STATUS},
{SLG51000_LDO3_EVENT, SLG51000_LDO3_STATUS},
{SLG51000_LDO4_EVENT, SLG51000_LDO4_STATUS},
{SLG51000_LDO5_EVENT, SLG51000_LDO5_STATUS},
{SLG51000_LDO6_EVENT, SLG51000_LDO6_STATUS},
{SLG51000_LDO7_EVENT, SLG51000_LDO7_STATUS},
{SLG51000_SYSCTL_EVENT, SLG51000_SYSCTL_STATUS},
};
static const struct regmap_range slg51000_writeable_ranges[] = {
regmap_reg_range(SLG51000_SYSCTL_MATRIX_CONF_A,
SLG51000_SYSCTL_MATRIX_CONF_A),
regmap_reg_range(SLG51000_LDO1_VSEL, SLG51000_LDO1_VSEL),
regmap_reg_range(SLG51000_LDO1_MINV, SLG51000_LDO1_MAXV),
regmap_reg_range(SLG51000_LDO1_IRQ_MASK, SLG51000_LDO1_IRQ_MASK),
regmap_reg_range(SLG51000_LDO2_VSEL, SLG51000_LDO2_VSEL),
regmap_reg_range(SLG51000_LDO2_MINV, SLG51000_LDO2_MAXV),
regmap_reg_range(SLG51000_LDO2_IRQ_MASK, SLG51000_LDO2_IRQ_MASK),
regmap_reg_range(SLG51000_LDO3_VSEL, SLG51000_LDO3_VSEL),
regmap_reg_range(SLG51000_LDO3_MINV, SLG51000_LDO3_MAXV),
regmap_reg_range(SLG51000_LDO3_IRQ_MASK, SLG51000_LDO3_IRQ_MASK),
regmap_reg_range(SLG51000_LDO4_VSEL, SLG51000_LDO4_VSEL),
regmap_reg_range(SLG51000_LDO4_MINV, SLG51000_LDO4_MAXV),
regmap_reg_range(SLG51000_LDO4_IRQ_MASK, SLG51000_LDO4_IRQ_MASK),
regmap_reg_range(SLG51000_LDO5_VSEL, SLG51000_LDO5_VSEL),
regmap_reg_range(SLG51000_LDO5_MINV, SLG51000_LDO5_MAXV),
regmap_reg_range(SLG51000_LDO5_IRQ_MASK, SLG51000_LDO5_IRQ_MASK),
regmap_reg_range(SLG51000_LDO6_VSEL, SLG51000_LDO6_VSEL),
regmap_reg_range(SLG51000_LDO6_MINV, SLG51000_LDO6_MAXV),
regmap_reg_range(SLG51000_LDO6_IRQ_MASK, SLG51000_LDO6_IRQ_MASK),
regmap_reg_range(SLG51000_LDO7_VSEL, SLG51000_LDO7_VSEL),
regmap_reg_range(SLG51000_LDO7_MINV, SLG51000_LDO7_MAXV),
regmap_reg_range(SLG51000_LDO7_IRQ_MASK, SLG51000_LDO7_IRQ_MASK),
regmap_reg_range(SLG51000_OTP_IRQ_MASK, SLG51000_OTP_IRQ_MASK),
};
static const struct regmap_range slg51000_readable_ranges[] = {
regmap_reg_range(SLG51000_SYSCTL_PATN_ID_B0,
SLG51000_SYSCTL_PATN_ID_B2),
regmap_reg_range(SLG51000_SYSCTL_SYS_CONF_A,
SLG51000_SYSCTL_SYS_CONF_A),
regmap_reg_range(SLG51000_SYSCTL_SYS_CONF_D,
SLG51000_SYSCTL_MATRIX_CONF_B),
regmap_reg_range(SLG51000_SYSCTL_REFGEN_CONF_C,
SLG51000_SYSCTL_UVLO_CONF_A),
regmap_reg_range(SLG51000_SYSCTL_FAULT_LOG1, SLG51000_SYSCTL_IRQ_MASK),
regmap_reg_range(SLG51000_IO_GPIO1_CONF, SLG51000_IO_GPIO_STATUS),
regmap_reg_range(SLG51000_LUTARRAY_LUT_VAL_0,
SLG51000_LUTARRAY_LUT_VAL_11),
regmap_reg_range(SLG51000_MUXARRAY_INPUT_SEL_0,
SLG51000_MUXARRAY_INPUT_SEL_63),
regmap_reg_range(SLG51000_PWRSEQ_RESOURCE_EN_0,
SLG51000_PWRSEQ_INPUT_SENSE_CONF_B),
regmap_reg_range(SLG51000_LDO1_VSEL, SLG51000_LDO1_VSEL),
regmap_reg_range(SLG51000_LDO1_MINV, SLG51000_LDO1_MAXV),
regmap_reg_range(SLG51000_LDO1_MISC1, SLG51000_LDO1_VSEL_ACTUAL),
regmap_reg_range(SLG51000_LDO1_EVENT, SLG51000_LDO1_IRQ_MASK),
regmap_reg_range(SLG51000_LDO2_VSEL, SLG51000_LDO2_VSEL),
regmap_reg_range(SLG51000_LDO2_MINV, SLG51000_LDO2_MAXV),
regmap_reg_range(SLG51000_LDO2_MISC1, SLG51000_LDO2_VSEL_ACTUAL),
regmap_reg_range(SLG51000_LDO2_EVENT, SLG51000_LDO2_IRQ_MASK),
regmap_reg_range(SLG51000_LDO3_VSEL, SLG51000_LDO3_VSEL),
regmap_reg_range(SLG51000_LDO3_MINV, SLG51000_LDO3_MAXV),
regmap_reg_range(SLG51000_LDO3_CONF1, SLG51000_LDO3_VSEL_ACTUAL),
regmap_reg_range(SLG51000_LDO3_EVENT, SLG51000_LDO3_IRQ_MASK),
regmap_reg_range(SLG51000_LDO4_VSEL, SLG51000_LDO4_VSEL),
regmap_reg_range(SLG51000_LDO4_MINV, SLG51000_LDO4_MAXV),
regmap_reg_range(SLG51000_LDO4_CONF1, SLG51000_LDO4_VSEL_ACTUAL),
regmap_reg_range(SLG51000_LDO4_EVENT, SLG51000_LDO4_IRQ_MASK),
regmap_reg_range(SLG51000_LDO5_VSEL, SLG51000_LDO5_VSEL),
regmap_reg_range(SLG51000_LDO5_MINV, SLG51000_LDO5_MAXV),
regmap_reg_range(SLG51000_LDO5_TRIM2, SLG51000_LDO5_TRIM2),
regmap_reg_range(SLG51000_LDO5_CONF1, SLG51000_LDO5_VSEL_ACTUAL),
regmap_reg_range(SLG51000_LDO5_EVENT, SLG51000_LDO5_IRQ_MASK),
regmap_reg_range(SLG51000_LDO6_VSEL, SLG51000_LDO6_VSEL),
regmap_reg_range(SLG51000_LDO6_MINV, SLG51000_LDO6_MAXV),
regmap_reg_range(SLG51000_LDO6_TRIM2, SLG51000_LDO6_TRIM2),
regmap_reg_range(SLG51000_LDO6_CONF1, SLG51000_LDO6_VSEL_ACTUAL),
regmap_reg_range(SLG51000_LDO6_EVENT, SLG51000_LDO6_IRQ_MASK),
regmap_reg_range(SLG51000_LDO7_VSEL, SLG51000_LDO7_VSEL),
regmap_reg_range(SLG51000_LDO7_MINV, SLG51000_LDO7_MAXV),
regmap_reg_range(SLG51000_LDO7_CONF1, SLG51000_LDO7_VSEL_ACTUAL),
regmap_reg_range(SLG51000_LDO7_EVENT, SLG51000_LDO7_IRQ_MASK),
regmap_reg_range(SLG51000_OTP_EVENT, SLG51000_OTP_EVENT),
regmap_reg_range(SLG51000_OTP_IRQ_MASK, SLG51000_OTP_IRQ_MASK),
regmap_reg_range(SLG51000_OTP_LOCK_OTP_PROG, SLG51000_OTP_LOCK_CTRL),
regmap_reg_range(SLG51000_LOCK_GLOBAL_LOCK_CTRL1,
SLG51000_LOCK_GLOBAL_LOCK_CTRL1),
};
static const struct regmap_range slg51000_volatile_ranges[] = {
regmap_reg_range(SLG51000_SYSCTL_FAULT_LOG1, SLG51000_SYSCTL_STATUS),
regmap_reg_range(SLG51000_IO_GPIO_STATUS, SLG51000_IO_GPIO_STATUS),
regmap_reg_range(SLG51000_LDO1_EVENT, SLG51000_LDO1_STATUS),
regmap_reg_range(SLG51000_LDO2_EVENT, SLG51000_LDO2_STATUS),
regmap_reg_range(SLG51000_LDO3_EVENT, SLG51000_LDO3_STATUS),
regmap_reg_range(SLG51000_LDO4_EVENT, SLG51000_LDO4_STATUS),
regmap_reg_range(SLG51000_LDO5_EVENT, SLG51000_LDO5_STATUS),
regmap_reg_range(SLG51000_LDO6_EVENT, SLG51000_LDO6_STATUS),
regmap_reg_range(SLG51000_LDO7_EVENT, SLG51000_LDO7_STATUS),
regmap_reg_range(SLG51000_OTP_EVENT, SLG51000_OTP_EVENT),
};
static const struct regmap_access_table slg51000_writeable_table = {
.yes_ranges = slg51000_writeable_ranges,
.n_yes_ranges = ARRAY_SIZE(slg51000_writeable_ranges),
};
static const struct regmap_access_table slg51000_readable_table = {
.yes_ranges = slg51000_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(slg51000_readable_ranges),
};
static const struct regmap_access_table slg51000_volatile_table = {
.yes_ranges = slg51000_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(slg51000_volatile_ranges),
};
static const struct regmap_config slg51000_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.max_register = 0x8000,
.wr_table = &slg51000_writeable_table,
.rd_table = &slg51000_readable_table,
.volatile_table = &slg51000_volatile_table,
};
static const struct regulator_ops slg51000_regl_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_ops slg51000_switch_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static int slg51000_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
{
struct gpio_desc *ena_gpiod;
ena_gpiod = fwnode_gpiod_get_index(of_fwnode_handle(np), "enable", 0,
GPIOD_OUT_LOW |
GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"gpio-en-ldo");
if (!IS_ERR(ena_gpiod))
config->ena_gpiod = ena_gpiod;
return 0;
}
#define SLG51000_REGL_DESC(_id, _name, _s_name, _min, _step) \
[SLG51000_REGULATOR_##_id] = { \
.name = #_name, \
.supply_name = _s_name, \
.id = SLG51000_REGULATOR_##_id, \
.of_match = of_match_ptr(#_name), \
.of_parse_cb = slg51000_of_parse_cb, \
.ops = &slg51000_regl_ops, \
.regulators_node = of_match_ptr("regulators"), \
.n_voltages = 256, \
.min_uV = _min, \
.uV_step = _step, \
.linear_min_sel = 0, \
.vsel_mask = SLG51000_VSEL_MASK, \
.vsel_reg = SLG51000_##_id##_VSEL, \
.enable_reg = SLG51000_SYSCTL_MATRIX_CONF_A, \
.enable_mask = BIT(SLG51000_REGULATOR_##_id), \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
static struct regulator_desc regls_desc[SLG51000_MAX_REGULATORS] = {
SLG51000_REGL_DESC(LDO1, ldo1, NULL, 2400000, 5000),
SLG51000_REGL_DESC(LDO2, ldo2, NULL, 2400000, 5000),
SLG51000_REGL_DESC(LDO3, ldo3, "vin3", 1200000, 10000),
SLG51000_REGL_DESC(LDO4, ldo4, "vin4", 1200000, 10000),
SLG51000_REGL_DESC(LDO5, ldo5, "vin5", 400000, 5000),
SLG51000_REGL_DESC(LDO6, ldo6, "vin6", 400000, 5000),
SLG51000_REGL_DESC(LDO7, ldo7, "vin7", 1200000, 10000),
};
static int slg51000_regulator_init(struct slg51000 *chip)
{
struct regulator_config config = { };
struct regulator_desc *rdesc;
unsigned int reg, val;
u8 vsel_range[2];
int id, ret = 0;
const unsigned int min_regs[SLG51000_MAX_REGULATORS] = {
SLG51000_LDO1_MINV, SLG51000_LDO2_MINV, SLG51000_LDO3_MINV,
SLG51000_LDO4_MINV, SLG51000_LDO5_MINV, SLG51000_LDO6_MINV,
SLG51000_LDO7_MINV,
};
for (id = 0; id < SLG51000_MAX_REGULATORS; id++) {
chip->rdesc[id] = ®ls_desc[id];
rdesc = chip->rdesc[id];
config.regmap = chip->regmap;
config.dev = chip->dev;
config.driver_data = chip;
ret = regmap_bulk_read(chip->regmap, min_regs[id],
vsel_range, 2);
if (ret < 0) {
dev_err(chip->dev,
"Failed to read the MIN register\n");
return ret;
}
switch (id) {
case SLG51000_REGULATOR_LDO1:
case SLG51000_REGULATOR_LDO2:
if (id == SLG51000_REGULATOR_LDO1)
reg = SLG51000_LDO1_MISC1;
else
reg = SLG51000_LDO2_MISC1;
ret = regmap_read(chip->regmap, reg, &val);
if (ret < 0) {
dev_err(chip->dev,
"Failed to read voltage range of ldo%d\n",
id + 1);
return ret;
}
rdesc->linear_min_sel = vsel_range[0];
rdesc->n_voltages = vsel_range[1] + 1;
if (val & SLG51000_SEL_VRANGE_MASK)
rdesc->min_uV = SLG51000_LDOHP_HV_MIN
+ (vsel_range[0]
* rdesc->uV_step);
else
rdesc->min_uV = SLG51000_LDOHP_LV_MIN
+ (vsel_range[0]
* rdesc->uV_step);
break;
case SLG51000_REGULATOR_LDO5:
case SLG51000_REGULATOR_LDO6:
if (id == SLG51000_REGULATOR_LDO5)
reg = SLG51000_LDO5_TRIM2;
else
reg = SLG51000_LDO6_TRIM2;
ret = regmap_read(chip->regmap, reg, &val);
if (ret < 0) {
dev_err(chip->dev,
"Failed to read LDO mode register\n");
return ret;
}
if (val & SLG51000_SEL_BYP_MODE_MASK) {
rdesc->ops = &slg51000_switch_ops;
rdesc->n_voltages = 0;
rdesc->min_uV = 0;
rdesc->uV_step = 0;
rdesc->linear_min_sel = 0;
break;
}
fallthrough; /* to the check below */
default:
rdesc->linear_min_sel = vsel_range[0];
rdesc->n_voltages = vsel_range[1] + 1;
rdesc->min_uV = rdesc->min_uV
+ (vsel_range[0] * rdesc->uV_step);
break;
}
chip->rdev[id] = devm_regulator_register(chip->dev, rdesc,
&config);
if (IS_ERR(chip->rdev[id])) {
ret = PTR_ERR(chip->rdev[id]);
dev_err(chip->dev,
"Failed to register regulator(%s):%d\n",
chip->rdesc[id]->name, ret);
return ret;
}
}
return 0;
}
static irqreturn_t slg51000_irq_handler(int irq, void *data)
{
struct slg51000 *chip = data;
struct regmap *regmap = chip->regmap;
enum { R0 = 0, R1, R2, REG_MAX };
u8 evt[SLG51000_MAX_EVT_REGISTER][REG_MAX];
int ret, i, handled = IRQ_NONE;
unsigned int evt_otp, mask_otp;
/* Read event[R0], status[R1] and mask[R2] register */
for (i = 0; i < SLG51000_MAX_EVT_REGISTER; i++) {
ret = regmap_bulk_read(regmap, es_reg[i].ereg, evt[i], REG_MAX);
if (ret < 0) {
dev_err(chip->dev,
"Failed to read event registers(%d)\n", ret);
return IRQ_NONE;
}
}
ret = regmap_read(regmap, SLG51000_OTP_EVENT, &evt_otp);
if (ret < 0) {
dev_err(chip->dev,
"Failed to read otp event registers(%d)\n", ret);
return IRQ_NONE;
}
ret = regmap_read(regmap, SLG51000_OTP_IRQ_MASK, &mask_otp);
if (ret < 0) {
dev_err(chip->dev,
"Failed to read otp mask register(%d)\n", ret);
return IRQ_NONE;
}
if ((evt_otp & SLG51000_EVT_CRC_MASK) &&
!(mask_otp & SLG51000_IRQ_CRC_MASK)) {
dev_info(chip->dev,
"OTP has been read or OTP crc is not zero\n");
handled = IRQ_HANDLED;
}
for (i = 0; i < SLG51000_MAX_REGULATORS; i++) {
if (!(evt[i][R2] & SLG51000_IRQ_ILIM_FLAG_MASK) &&
(evt[i][R0] & SLG51000_EVT_ILIM_FLAG_MASK)) {
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_CURRENT, NULL);
if (evt[i][R1] & SLG51000_STA_ILIM_FLAG_MASK)
dev_warn(chip->dev,
"Over-current limit(ldo%d)\n", i + 1);
handled = IRQ_HANDLED;
}
}
if (!(evt[SLG51000_SCTL_EVT][R2] & SLG51000_IRQ_HIGH_TEMP_WARN_MASK) &&
(evt[SLG51000_SCTL_EVT][R0] & SLG51000_EVT_HIGH_TEMP_WARN_MASK)) {
for (i = 0; i < SLG51000_MAX_REGULATORS; i++) {
if (!(evt[i][R1] & SLG51000_STA_ILIM_FLAG_MASK) &&
(evt[i][R1] & SLG51000_STA_VOUT_OK_FLAG_MASK)) {
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP, NULL);
}
}
handled = IRQ_HANDLED;
if (evt[SLG51000_SCTL_EVT][R1] &
SLG51000_STA_HIGH_TEMP_WARN_MASK)
dev_warn(chip->dev, "High temperature warning!\n");
}
return handled;
}
static void slg51000_clear_fault_log(struct slg51000 *chip)
{
unsigned int val = 0;
int ret = 0;
ret = regmap_read(chip->regmap, SLG51000_SYSCTL_FAULT_LOG1, &val);
if (ret < 0) {
dev_err(chip->dev, "Failed to read Fault log register\n");
return;
}
if (val & SLG51000_FLT_OVER_TEMP_MASK)
dev_dbg(chip->dev, "Fault log: FLT_OVER_TEMP\n");
if (val & SLG51000_FLT_POWER_SEQ_CRASH_REQ_MASK)
dev_dbg(chip->dev, "Fault log: FLT_POWER_SEQ_CRASH_REQ\n");
if (val & SLG51000_FLT_RST_MASK)
dev_dbg(chip->dev, "Fault log: FLT_RST\n");
if (val & SLG51000_FLT_POR_MASK)
dev_dbg(chip->dev, "Fault log: FLT_POR\n");
}
static int slg51000_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct slg51000 *chip;
struct gpio_desc *cs_gpiod;
int error, ret;
chip = devm_kzalloc(dev, sizeof(struct slg51000), GFP_KERNEL);
if (!chip)
return -ENOMEM;
cs_gpiod = devm_gpiod_get_optional(dev, "dlg,cs",
GPIOD_OUT_HIGH |
GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(cs_gpiod))
return PTR_ERR(cs_gpiod);
if (cs_gpiod) {
dev_info(dev, "Found chip selector property\n");
chip->cs_gpiod = cs_gpiod;
}
usleep_range(10000, 11000);
i2c_set_clientdata(client, chip);
chip->chip_irq = client->irq;
chip->dev = dev;
chip->regmap = devm_regmap_init_i2c(client, &slg51000_regmap_config);
if (IS_ERR(chip->regmap)) {
error = PTR_ERR(chip->regmap);
dev_err(dev, "Failed to allocate register map: %d\n",
error);
return error;
}
ret = slg51000_regulator_init(chip);
if (ret < 0) {
dev_err(chip->dev, "Failed to init regulator(%d)\n", ret);
return ret;
}
slg51000_clear_fault_log(chip);
if (chip->chip_irq) {
ret = devm_request_threaded_irq(dev, chip->chip_irq, NULL,
slg51000_irq_handler,
(IRQF_TRIGGER_HIGH |
IRQF_ONESHOT),
"slg51000-irq", chip);
if (ret != 0) {
dev_err(dev, "Failed to request IRQ: %d\n",
chip->chip_irq);
return ret;
}
} else {
dev_info(dev, "No IRQ configured\n");
}
return ret;
}
static const struct i2c_device_id slg51000_i2c_id[] = {
{"slg51000", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, slg51000_i2c_id);
static struct i2c_driver slg51000_regulator_driver = {
.driver = {
.name = "slg51000-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = slg51000_i2c_probe,
.id_table = slg51000_i2c_id,
};
module_i2c_driver(slg51000_regulator_driver);
MODULE_AUTHOR("Eric Jeong <[email protected]>");
MODULE_DESCRIPTION("SLG51000 regulator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/regulator/slg51000-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// Regulator Driver for Freescale MC13783 PMIC
//
// Copyright 2010 Yong Shen <[email protected]>
// Copyright (C) 2008 Sascha Hauer, Pengutronix <[email protected]>
// Copyright 2009 Alberto Panizzo <[email protected]>
#include <linux/mfd/mc13783.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include "mc13xxx.h"
#define MC13783_REG_SWITCHERS0 24
/* Enable does not exist for SW1A */
#define MC13783_REG_SWITCHERS0_SW1AEN 0
#define MC13783_REG_SWITCHERS0_SW1AVSEL 0
#define MC13783_REG_SWITCHERS0_SW1AVSEL_M (63 << 0)
#define MC13783_REG_SWITCHERS1 25
/* Enable does not exist for SW1B */
#define MC13783_REG_SWITCHERS1_SW1BEN 0
#define MC13783_REG_SWITCHERS1_SW1BVSEL 0
#define MC13783_REG_SWITCHERS1_SW1BVSEL_M (63 << 0)
#define MC13783_REG_SWITCHERS2 26
/* Enable does not exist for SW2A */
#define MC13783_REG_SWITCHERS2_SW2AEN 0
#define MC13783_REG_SWITCHERS2_SW2AVSEL 0
#define MC13783_REG_SWITCHERS2_SW2AVSEL_M (63 << 0)
#define MC13783_REG_SWITCHERS3 27
/* Enable does not exist for SW2B */
#define MC13783_REG_SWITCHERS3_SW2BEN 0
#define MC13783_REG_SWITCHERS3_SW2BVSEL 0
#define MC13783_REG_SWITCHERS3_SW2BVSEL_M (63 << 0)
#define MC13783_REG_SWITCHERS5 29
#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
#define MC13783_REG_SWITCHERS5_SW3VSEL 18
#define MC13783_REG_SWITCHERS5_SW3VSEL_M (3 << 18)
#define MC13783_REG_REGULATORSETTING0 30
#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL 2
#define MC13783_REG_REGULATORSETTING0_VDIGVSEL 4
#define MC13783_REG_REGULATORSETTING0_VGENVSEL 6
#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL 9
#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL 11
#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL 13
#define MC13783_REG_REGULATORSETTING0_VSIMVSEL 14
#define MC13783_REG_REGULATORSETTING0_VESIMVSEL 15
#define MC13783_REG_REGULATORSETTING0_VCAMVSEL 16
#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL_M (3 << 2)
#define MC13783_REG_REGULATORSETTING0_VDIGVSEL_M (3 << 4)
#define MC13783_REG_REGULATORSETTING0_VGENVSEL_M (7 << 6)
#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL_M (3 << 9)
#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL_M (3 << 11)
#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL_M (1 << 13)
#define MC13783_REG_REGULATORSETTING0_VSIMVSEL_M (1 << 14)
#define MC13783_REG_REGULATORSETTING0_VESIMVSEL_M (1 << 15)
#define MC13783_REG_REGULATORSETTING0_VCAMVSEL_M (7 << 16)
#define MC13783_REG_REGULATORSETTING1 31
#define MC13783_REG_REGULATORSETTING1_VVIBVSEL 0
#define MC13783_REG_REGULATORSETTING1_VRF1VSEL 2
#define MC13783_REG_REGULATORSETTING1_VRF2VSEL 4
#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL 6
#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL 9
#define MC13783_REG_REGULATORSETTING1_VVIBVSEL_M (3 << 0)
#define MC13783_REG_REGULATORSETTING1_VRF1VSEL_M (3 << 2)
#define MC13783_REG_REGULATORSETTING1_VRF2VSEL_M (3 << 4)
#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL_M (7 << 6)
#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL_M (7 << 9)
#define MC13783_REG_REGULATORMODE0 32
#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
#define MC13783_REG_REGULATORMODE0_VIOHIEN (1 << 3)
#define MC13783_REG_REGULATORMODE0_VIOLOEN (1 << 6)
#define MC13783_REG_REGULATORMODE0_VDIGEN (1 << 9)
#define MC13783_REG_REGULATORMODE0_VGENEN (1 << 12)
#define MC13783_REG_REGULATORMODE0_VRFDIGEN (1 << 15)
#define MC13783_REG_REGULATORMODE0_VRFREFEN (1 << 18)
#define MC13783_REG_REGULATORMODE0_VRFCPEN (1 << 21)
#define MC13783_REG_REGULATORMODE1 33
#define MC13783_REG_REGULATORMODE1_VSIMEN (1 << 0)
#define MC13783_REG_REGULATORMODE1_VESIMEN (1 << 3)
#define MC13783_REG_REGULATORMODE1_VCAMEN (1 << 6)
#define MC13783_REG_REGULATORMODE1_VRFBGEN (1 << 9)
#define MC13783_REG_REGULATORMODE1_VVIBEN (1 << 11)
#define MC13783_REG_REGULATORMODE1_VRF1EN (1 << 12)
#define MC13783_REG_REGULATORMODE1_VRF2EN (1 << 15)
#define MC13783_REG_REGULATORMODE1_VMMC1EN (1 << 18)
#define MC13783_REG_REGULATORMODE1_VMMC2EN (1 << 21)
#define MC13783_REG_POWERMISC 34
#define MC13783_REG_POWERMISC_GPO1EN (1 << 6)
#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
#define MC13783_REG_POWERMISC_PWGT1SPIEN (1 << 15)
#define MC13783_REG_POWERMISC_PWGT2SPIEN (1 << 16)
#define MC13783_REG_POWERMISC_PWGTSPI_M (3 << 15)
/* Voltage Values */
static const int mc13783_sw1x_val[] = {
900000, 925000, 950000, 975000,
1000000, 1025000, 1050000, 1075000,
1100000, 1125000, 1150000, 1175000,
1200000, 1225000, 1250000, 1275000,
1300000, 1325000, 1350000, 1375000,
1400000, 1425000, 1450000, 1475000,
1500000, 1525000, 1550000, 1575000,
1600000, 1625000, 1650000, 1675000,
1700000, 1700000, 1700000, 1700000,
1800000, 1800000, 1800000, 1800000,
1850000, 1850000, 1850000, 1850000,
2000000, 2000000, 2000000, 2000000,
2100000, 2100000, 2100000, 2100000,
2200000, 2200000, 2200000, 2200000,
2200000, 2200000, 2200000, 2200000,
2200000, 2200000, 2200000, 2200000,
};
static const int mc13783_sw2x_val[] = {
900000, 925000, 950000, 975000,
1000000, 1025000, 1050000, 1075000,
1100000, 1125000, 1150000, 1175000,
1200000, 1225000, 1250000, 1275000,
1300000, 1325000, 1350000, 1375000,
1400000, 1425000, 1450000, 1475000,
1500000, 1525000, 1550000, 1575000,
1600000, 1625000, 1650000, 1675000,
1700000, 1700000, 1700000, 1700000,
1800000, 1800000, 1800000, 1800000,
1900000, 1900000, 1900000, 1900000,
2000000, 2000000, 2000000, 2000000,
2100000, 2100000, 2100000, 2100000,
2200000, 2200000, 2200000, 2200000,
2200000, 2200000, 2200000, 2200000,
2200000, 2200000, 2200000, 2200000,
};
static const unsigned int mc13783_sw3_val[] = {
5000000, 5000000, 5000000, 5500000,
};
static const unsigned int mc13783_vaudio_val[] = {
2775000,
};
static const unsigned int mc13783_viohi_val[] = {
2775000,
};
static const unsigned int mc13783_violo_val[] = {
1200000, 1300000, 1500000, 1800000,
};
static const unsigned int mc13783_vdig_val[] = {
1200000, 1300000, 1500000, 1800000,
};
static const unsigned int mc13783_vgen_val[] = {
1200000, 1300000, 1500000, 1800000,
1100000, 2000000, 2775000, 2400000,
};
static const unsigned int mc13783_vrfdig_val[] = {
1200000, 1500000, 1800000, 1875000,
};
static const unsigned int mc13783_vrfref_val[] = {
2475000, 2600000, 2700000, 2775000,
};
static const unsigned int mc13783_vrfcp_val[] = {
2700000, 2775000,
};
static const unsigned int mc13783_vsim_val[] = {
1800000, 2900000, 3000000,
};
static const unsigned int mc13783_vesim_val[] = {
1800000, 2900000,
};
static const unsigned int mc13783_vcam_val[] = {
1500000, 1800000, 2500000, 2550000,
2600000, 2750000, 2800000, 3000000,
};
static const unsigned int mc13783_vrfbg_val[] = {
1250000,
};
static const unsigned int mc13783_vvib_val[] = {
1300000, 1800000, 2000000, 3000000,
};
static const unsigned int mc13783_vmmc_val[] = {
1600000, 1800000, 2000000, 2600000,
2700000, 2800000, 2900000, 3000000,
};
static const unsigned int mc13783_vrf_val[] = {
1500000, 1875000, 2700000, 2775000,
};
static const unsigned int mc13783_gpo_val[] = {
3100000,
};
static const unsigned int mc13783_pwgtdrv_val[] = {
5500000,
};
static const struct regulator_ops mc13783_gpo_regulator_ops;
#define MC13783_DEFINE(prefix, name, node, reg, vsel_reg, voltages) \
MC13xxx_DEFINE(MC13783_REG_, name, node, reg, vsel_reg, voltages, \
mc13xxx_regulator_ops)
#define MC13783_FIXED_DEFINE(prefix, name, node, reg, voltages) \
MC13xxx_FIXED_DEFINE(MC13783_REG_, name, node, reg, voltages, \
mc13xxx_fixed_regulator_ops)
#define MC13783_GPO_DEFINE(prefix, name, node, reg, voltages) \
MC13xxx_GPO_DEFINE(MC13783_REG_, name, node, reg, voltages, \
mc13783_gpo_regulator_ops)
#define MC13783_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages) \
MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages)
#define MC13783_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages) \
MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages)
static struct mc13xxx_regulator mc13783_regulators[] = {
MC13783_DEFINE_SW(SW1A, sw1a, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val),
MC13783_DEFINE_SW(SW1B, sw1b, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val),
MC13783_DEFINE_SW(SW2A, sw2a, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val),
MC13783_DEFINE_SW(SW2B, sw2b, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val),
MC13783_DEFINE_SW(SW3, sw3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
MC13783_FIXED_DEFINE(REG, VAUDIO, vaudio, REGULATORMODE0, mc13783_vaudio_val),
MC13783_FIXED_DEFINE(REG, VIOHI, viohi, REGULATORMODE0, mc13783_viohi_val),
MC13783_DEFINE_REGU(VIOLO, violo, REGULATORMODE0, REGULATORSETTING0,
mc13783_violo_val),
MC13783_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0,
mc13783_vdig_val),
MC13783_DEFINE_REGU(VGEN, vgen, REGULATORMODE0, REGULATORSETTING0,
mc13783_vgen_val),
MC13783_DEFINE_REGU(VRFDIG, vrfdig, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfdig_val),
MC13783_DEFINE_REGU(VRFREF, vrfref, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfref_val),
MC13783_DEFINE_REGU(VRFCP, vrfcp, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfcp_val),
MC13783_DEFINE_REGU(VSIM, vsim, REGULATORMODE1, REGULATORSETTING0,
mc13783_vsim_val),
MC13783_DEFINE_REGU(VESIM, vesim, REGULATORMODE1, REGULATORSETTING0,
mc13783_vesim_val),
MC13783_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0,
mc13783_vcam_val),
MC13783_FIXED_DEFINE(REG, VRFBG, vrfbg, REGULATORMODE1, mc13783_vrfbg_val),
MC13783_DEFINE_REGU(VVIB, vvib, REGULATORMODE1, REGULATORSETTING1,
mc13783_vvib_val),
MC13783_DEFINE_REGU(VRF1, vrf1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
MC13783_DEFINE_REGU(VRF2, vrf2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
MC13783_DEFINE_REGU(VMMC1, vmmc1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
MC13783_DEFINE_REGU(VMMC2, vmmc2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
MC13783_GPO_DEFINE(REG, GPO1, gpo1, POWERMISC, mc13783_gpo_val),
MC13783_GPO_DEFINE(REG, GPO2, gpo1, POWERMISC, mc13783_gpo_val),
MC13783_GPO_DEFINE(REG, GPO3, gpo1, POWERMISC, mc13783_gpo_val),
MC13783_GPO_DEFINE(REG, GPO4, gpo1, POWERMISC, mc13783_gpo_val),
MC13783_GPO_DEFINE(REG, PWGT1SPI, pwgt1spi, POWERMISC, mc13783_pwgtdrv_val),
MC13783_GPO_DEFINE(REG, PWGT2SPI, pwgt2spi, POWERMISC, mc13783_pwgtdrv_val),
};
static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
u32 val)
{
struct mc13xxx *mc13783 = priv->mc13xxx;
int ret;
u32 valread;
BUG_ON(val & ~mask);
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
if (ret)
goto out;
/* Update the stored state for Power Gates. */
priv->powermisc_pwgt_state =
(priv->powermisc_pwgt_state & ~mask) | val;
priv->powermisc_pwgt_state &= MC13783_REG_POWERMISC_PWGTSPI_M;
/* Construct the new register value */
valread = (valread & ~mask) | val;
/* Overwrite the PWGTxEN with the stored version */
valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
priv->powermisc_pwgt_state;
ret = mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
out:
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
u32 en_val = mc13xxx_regulators[id].enable_bit;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
/* Power Gate enable value is 0 */
if (id == MC13783_REG_PWGT1SPI ||
id == MC13783_REG_PWGT2SPI)
en_val = 0;
return mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
en_val);
}
static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
u32 dis_val = 0;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
/* Power Gate disable value is 1 */
if (id == MC13783_REG_PWGT1SPI ||
id == MC13783_REG_PWGT2SPI)
dis_val = mc13xxx_regulators[id].enable_bit;
return mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
dis_val);
}
static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int ret, id = rdev_get_id(rdev);
unsigned int val;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
/* Power Gates state is stored in powermisc_pwgt_state
* where the meaning of bits is negated */
val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
(priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
static const struct regulator_ops mc13783_gpo_regulator_ops = {
.enable = mc13783_gpo_regulator_enable,
.disable = mc13783_gpo_regulator_disable,
.is_enabled = mc13783_gpo_regulator_is_enabled,
.list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
};
static int mc13783_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
struct mc13xxx_regulator_init_data *mc13xxx_data;
struct regulator_config config = { };
int i, num_regulators;
num_regulators = mc13xxx_get_num_regulators_dt(pdev);
if (num_regulators <= 0 && pdata)
num_regulators = pdata->num_regulators;
if (num_regulators <= 0)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev,
struct_size(priv, regulators, num_regulators),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->num_regulators = num_regulators;
priv->mc13xxx_regulators = mc13783_regulators;
priv->mc13xxx = mc13783;
platform_set_drvdata(pdev, priv);
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13783_regulators,
ARRAY_SIZE(mc13783_regulators));
for (i = 0; i < priv->num_regulators; i++) {
struct regulator_init_data *init_data;
struct regulator_desc *desc;
struct device_node *node = NULL;
int id;
if (mc13xxx_data) {
id = mc13xxx_data[i].id;
init_data = mc13xxx_data[i].init_data;
node = mc13xxx_data[i].node;
} else {
id = pdata->regulators[i].id;
init_data = pdata->regulators[i].init_data;
}
desc = &mc13783_regulators[id].desc;
config.dev = &pdev->dev;
config.init_data = init_data;
config.driver_data = priv;
config.of_node = node;
priv->regulators[i] = devm_regulator_register(&pdev->dev, desc,
&config);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
mc13783_regulators[i].desc.name);
return PTR_ERR(priv->regulators[i]);
}
}
return 0;
}
static struct platform_driver mc13783_regulator_driver = {
.driver = {
.name = "mc13783-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mc13783_regulator_probe,
};
static int __init mc13783_regulator_init(void)
{
return platform_driver_register(&mc13783_regulator_driver);
}
subsys_initcall(mc13783_regulator_init);
static void __exit mc13783_regulator_exit(void)
{
platform_driver_unregister(&mc13783_regulator_driver);
}
module_exit(mc13783_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sascha Hauer <[email protected]>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
MODULE_ALIAS("platform:mc13783-regulator");
| linux-master | drivers/regulator/mc13783-regulator.c |
// SPDX-License-Identifier: GPL-2.0
//
// FAN53555 Fairchild Digitally Programmable TinyBuck Regulator Driver.
//
// Supported Part Numbers:
// FAN53555UC00X/01X/03X/04X/05X
//
// Copyright (c) 2012 Marvell Technology Ltd.
// Yunfan Zhang <[email protected]>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/param.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/fan53555.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
/* Voltage setting */
#define FAN53555_VSEL0 0x00
#define FAN53555_VSEL1 0x01
#define RK8602_VSEL0 0x06
#define RK8602_VSEL1 0x07
#define TCS4525_VSEL0 0x11
#define TCS4525_VSEL1 0x10
#define TCS4525_TIME 0x13
#define TCS4525_COMMAND 0x14
/* Control register */
#define FAN53555_CONTROL 0x02
/* IC Type */
#define FAN53555_ID1 0x03
/* IC mask version */
#define FAN53555_ID2 0x04
/* Monitor register */
#define FAN53555_MONITOR 0x05
/* VSEL bit definitions */
#define VSEL_BUCK_EN BIT(7)
#define VSEL_MODE BIT(6)
/* Chip ID and Verison */
#define DIE_ID 0x0F /* ID1 */
#define DIE_REV 0x0F /* ID2 */
/* Control bit definitions */
#define CTL_OUTPUT_DISCHG BIT(7)
#define CTL_SLEW_MASK GENMASK(6, 4)
#define CTL_RESET BIT(2)
#define CTL_MODE_VSEL0_MODE BIT(0)
#define CTL_MODE_VSEL1_MODE BIT(1)
#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */
#define FAN53526_NVOLTAGES 128
#define RK8602_NVOLTAGES 160
#define TCS_VSEL0_MODE BIT(7)
#define TCS_VSEL1_MODE BIT(6)
#define TCS_SLEW_MASK GENMASK(4, 3)
enum fan53555_vendor {
FAN53526_VENDOR_FAIRCHILD = 0,
FAN53555_VENDOR_FAIRCHILD,
FAN53555_VENDOR_ROCKCHIP, /* RK8600, RK8601 */
RK8602_VENDOR_ROCKCHIP, /* RK8602, RK8603 */
FAN53555_VENDOR_SILERGY,
FAN53526_VENDOR_TCS,
};
enum {
FAN53526_CHIP_ID_01 = 1,
};
enum {
FAN53526_CHIP_REV_08 = 8,
};
/* IC Type */
enum {
FAN53555_CHIP_ID_00 = 0,
FAN53555_CHIP_ID_01,
FAN53555_CHIP_ID_02,
FAN53555_CHIP_ID_03,
FAN53555_CHIP_ID_04,
FAN53555_CHIP_ID_05,
FAN53555_CHIP_ID_08 = 8,
};
enum {
RK8600_CHIP_ID_08 = 8, /* RK8600, RK8601 */
};
enum {
RK8602_CHIP_ID_10 = 10, /* RK8602, RK8603 */
};
enum {
TCS4525_CHIP_ID_12 = 12,
};
enum {
TCS4526_CHIP_ID_00 = 0,
};
/* IC mask revision */
enum {
FAN53555_CHIP_REV_00 = 0x3,
FAN53555_CHIP_REV_13 = 0xf,
};
enum {
SILERGY_SYR82X = 8,
SILERGY_SYR83X = 9,
};
struct fan53555_device_info {
enum fan53555_vendor vendor;
struct device *dev;
struct regulator_desc desc;
struct regulator_init_data *regulator;
/* IC Type and Rev */
int chip_id;
int chip_rev;
/* Voltage setting register */
unsigned int vol_reg;
unsigned int sleep_reg;
unsigned int en_reg;
unsigned int sleep_en_reg;
/* Voltage range and step(linear) */
unsigned int vsel_min;
unsigned int vsel_step;
unsigned int vsel_count;
/* Mode */
unsigned int mode_reg;
unsigned int mode_mask;
/* Sleep voltage cache */
unsigned int sleep_vol_cache;
/* Slew rate */
unsigned int slew_reg;
unsigned int slew_mask;
const unsigned int *ramp_delay_table;
unsigned int n_ramp_values;
unsigned int slew_rate;
};
static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
int ret;
if (di->sleep_vol_cache == uV)
return 0;
ret = regulator_map_voltage_linear(rdev, uV, uV);
if (ret < 0)
return ret;
ret = regmap_update_bits(rdev->regmap, di->sleep_reg,
di->desc.vsel_mask, ret);
if (ret < 0)
return ret;
/* Cache the sleep voltage setting.
* Might not be the real voltage which is rounded */
di->sleep_vol_cache = uV;
return 0;
}
static int fan53555_set_suspend_enable(struct regulator_dev *rdev)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
return regmap_update_bits(rdev->regmap, di->sleep_en_reg,
VSEL_BUCK_EN, VSEL_BUCK_EN);
}
static int fan53555_set_suspend_disable(struct regulator_dev *rdev)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
return regmap_update_bits(rdev->regmap, di->sleep_en_reg,
VSEL_BUCK_EN, 0);
}
static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
switch (mode) {
case REGULATOR_MODE_FAST:
regmap_update_bits(rdev->regmap, di->mode_reg,
di->mode_mask, di->mode_mask);
break;
case REGULATOR_MODE_NORMAL:
regmap_update_bits(rdev->regmap, di->vol_reg, di->mode_mask, 0);
break;
default:
return -EINVAL;
}
return 0;
}
static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
unsigned int val;
int ret = 0;
ret = regmap_read(rdev->regmap, di->mode_reg, &val);
if (ret < 0)
return ret;
if (val & di->mode_mask)
return REGULATOR_MODE_FAST;
else
return REGULATOR_MODE_NORMAL;
}
static const unsigned int slew_rates[] = {
64000,
32000,
16000,
8000,
4000,
2000,
1000,
500,
};
static const unsigned int tcs_slew_rates[] = {
18700,
9300,
4600,
2300,
};
static const struct regulator_ops fan53555_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_suspend_voltage = fan53555_set_suspend_voltage,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.set_mode = fan53555_set_mode,
.get_mode = fan53555_get_mode,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_enable = fan53555_set_suspend_enable,
.set_suspend_disable = fan53555_set_suspend_disable,
};
static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
{
/* Init voltage range and step */
switch (di->chip_id) {
case FAN53526_CHIP_ID_01:
switch (di->chip_rev) {
case FAN53526_CHIP_REV_08:
di->vsel_min = 600000;
di->vsel_step = 6250;
break;
default:
dev_err(di->dev,
"Chip ID %d with rev %d not supported!\n",
di->chip_id, di->chip_rev);
return -EINVAL;
}
break;
default:
dev_err(di->dev,
"Chip ID %d not supported!\n", di->chip_id);
return -EINVAL;
}
di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53526_NVOLTAGES;
return 0;
}
static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
{
/* Init voltage range and step */
switch (di->chip_id) {
case FAN53555_CHIP_ID_00:
switch (di->chip_rev) {
case FAN53555_CHIP_REV_00:
di->vsel_min = 600000;
di->vsel_step = 10000;
break;
case FAN53555_CHIP_REV_13:
di->vsel_min = 800000;
di->vsel_step = 10000;
break;
default:
dev_err(di->dev,
"Chip ID %d with rev %d not supported!\n",
di->chip_id, di->chip_rev);
return -EINVAL;
}
break;
case FAN53555_CHIP_ID_01:
case FAN53555_CHIP_ID_03:
case FAN53555_CHIP_ID_05:
case FAN53555_CHIP_ID_08:
di->vsel_min = 600000;
di->vsel_step = 10000;
break;
case FAN53555_CHIP_ID_04:
di->vsel_min = 603000;
di->vsel_step = 12826;
break;
default:
dev_err(di->dev,
"Chip ID %d not supported!\n", di->chip_id);
return -EINVAL;
}
di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
}
static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di)
{
/* Init voltage range and step */
switch (di->chip_id) {
case RK8600_CHIP_ID_08:
di->vsel_min = 712500;
di->vsel_step = 12500;
break;
default:
dev_err(di->dev,
"Chip ID %d not supported!\n", di->chip_id);
return -EINVAL;
}
di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
}
static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di)
{
/* Init voltage range and step */
switch (di->chip_id) {
case RK8602_CHIP_ID_10:
di->vsel_min = 500000;
di->vsel_step = 6250;
break;
default:
dev_err(di->dev,
"Chip ID %d not supported!\n", di->chip_id);
return -EINVAL;
}
di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = RK8602_NVOLTAGES;
return 0;
}
static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
{
/* Init voltage range and step */
switch (di->chip_id) {
case SILERGY_SYR82X:
case SILERGY_SYR83X:
di->vsel_min = 712500;
di->vsel_step = 12500;
break;
default:
dev_err(di->dev,
"Chip ID %d not supported!\n", di->chip_id);
return -EINVAL;
}
di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
}
static int fan53526_voltages_setup_tcs(struct fan53555_device_info *di)
{
switch (di->chip_id) {
case TCS4525_CHIP_ID_12:
case TCS4526_CHIP_ID_00:
di->slew_reg = TCS4525_TIME;
di->slew_mask = TCS_SLEW_MASK;
di->ramp_delay_table = tcs_slew_rates;
di->n_ramp_values = ARRAY_SIZE(tcs_slew_rates);
/* Init voltage range and step */
di->vsel_min = 600000;
di->vsel_step = 6250;
di->vsel_count = FAN53526_NVOLTAGES;
break;
default:
dev_err(di->dev, "Chip ID %d not supported!\n", di->chip_id);
return -EINVAL;
}
return 0;
}
/* For 00,01,03,05 options:
* VOUT = 0.60V + NSELx * 10mV, from 0.60 to 1.23V.
* For 04 option:
* VOUT = 0.603V + NSELx * 12.826mV, from 0.603 to 1.411V.
* */
static int fan53555_device_setup(struct fan53555_device_info *di,
struct fan53555_platform_data *pdata)
{
int ret = 0;
/* Setup voltage control register */
switch (di->vendor) {
case FAN53526_VENDOR_FAIRCHILD:
case FAN53555_VENDOR_FAIRCHILD:
case FAN53555_VENDOR_ROCKCHIP:
case FAN53555_VENDOR_SILERGY:
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->sleep_reg = FAN53555_VSEL0;
di->vol_reg = FAN53555_VSEL1;
break;
case FAN53555_VSEL_ID_1:
di->sleep_reg = FAN53555_VSEL1;
di->vol_reg = FAN53555_VSEL0;
break;
default:
dev_err(di->dev, "Invalid VSEL ID!\n");
return -EINVAL;
}
di->sleep_en_reg = di->sleep_reg;
di->en_reg = di->vol_reg;
break;
case RK8602_VENDOR_ROCKCHIP:
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->sleep_reg = RK8602_VSEL0;
di->vol_reg = RK8602_VSEL1;
di->sleep_en_reg = FAN53555_VSEL0;
di->en_reg = FAN53555_VSEL1;
break;
case FAN53555_VSEL_ID_1:
di->sleep_reg = RK8602_VSEL1;
di->vol_reg = RK8602_VSEL0;
di->sleep_en_reg = FAN53555_VSEL1;
di->en_reg = FAN53555_VSEL0;
break;
default:
dev_err(di->dev, "Invalid VSEL ID!\n");
return -EINVAL;
}
break;
case FAN53526_VENDOR_TCS:
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->sleep_reg = TCS4525_VSEL0;
di->vol_reg = TCS4525_VSEL1;
break;
case FAN53555_VSEL_ID_1:
di->sleep_reg = TCS4525_VSEL1;
di->vol_reg = TCS4525_VSEL0;
break;
default:
dev_err(di->dev, "Invalid VSEL ID!\n");
return -EINVAL;
}
di->sleep_en_reg = di->sleep_reg;
di->en_reg = di->vol_reg;
break;
default:
dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
return -EINVAL;
}
/* Setup mode control register */
switch (di->vendor) {
case FAN53526_VENDOR_FAIRCHILD:
di->mode_reg = FAN53555_CONTROL;
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->mode_mask = CTL_MODE_VSEL1_MODE;
break;
case FAN53555_VSEL_ID_1:
di->mode_mask = CTL_MODE_VSEL0_MODE;
break;
}
break;
case FAN53555_VENDOR_FAIRCHILD:
case FAN53555_VENDOR_ROCKCHIP:
case FAN53555_VENDOR_SILERGY:
di->mode_reg = di->vol_reg;
di->mode_mask = VSEL_MODE;
break;
case RK8602_VENDOR_ROCKCHIP:
di->mode_mask = VSEL_MODE;
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->mode_reg = FAN53555_VSEL1;
break;
case FAN53555_VSEL_ID_1:
di->mode_reg = FAN53555_VSEL0;
break;
}
break;
case FAN53526_VENDOR_TCS:
di->mode_reg = TCS4525_COMMAND;
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->mode_mask = TCS_VSEL1_MODE;
break;
case FAN53555_VSEL_ID_1:
di->mode_mask = TCS_VSEL0_MODE;
break;
}
break;
default:
dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
return -EINVAL;
}
/* Setup voltage range */
switch (di->vendor) {
case FAN53526_VENDOR_FAIRCHILD:
ret = fan53526_voltages_setup_fairchild(di);
break;
case FAN53555_VENDOR_FAIRCHILD:
ret = fan53555_voltages_setup_fairchild(di);
break;
case FAN53555_VENDOR_ROCKCHIP:
ret = fan53555_voltages_setup_rockchip(di);
break;
case RK8602_VENDOR_ROCKCHIP:
ret = rk8602_voltages_setup_rockchip(di);
break;
case FAN53555_VENDOR_SILERGY:
ret = fan53555_voltages_setup_silergy(di);
break;
case FAN53526_VENDOR_TCS:
ret = fan53526_voltages_setup_tcs(di);
break;
default:
dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
return -EINVAL;
}
return ret;
}
static int fan53555_regulator_register(struct fan53555_device_info *di,
struct regulator_config *config)
{
struct regulator_desc *rdesc = &di->desc;
struct regulator_dev *rdev;
rdesc->name = "fan53555-reg";
rdesc->supply_name = "vin";
rdesc->ops = &fan53555_regulator_ops;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->n_voltages = di->vsel_count;
rdesc->enable_reg = di->en_reg;
rdesc->enable_mask = VSEL_BUCK_EN;
rdesc->min_uV = di->vsel_min;
rdesc->uV_step = di->vsel_step;
rdesc->vsel_reg = di->vol_reg;
rdesc->vsel_mask = BIT(fls(di->vsel_count - 1)) - 1;
rdesc->ramp_reg = di->slew_reg;
rdesc->ramp_mask = di->slew_mask;
rdesc->ramp_delay_table = di->ramp_delay_table;
rdesc->n_ramp_values = di->n_ramp_values;
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
return PTR_ERR_OR_ZERO(rdev);
}
static const struct regmap_config fan53555_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev,
struct device_node *np,
const struct regulator_desc *desc)
{
struct fan53555_platform_data *pdata;
int ret;
u32 tmp;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->regulator = of_get_regulator_init_data(dev, np, desc);
ret = of_property_read_u32(np, "fcs,suspend-voltage-selector",
&tmp);
if (!ret)
pdata->sleep_vsel_id = tmp;
return pdata;
}
static const struct of_device_id __maybe_unused fan53555_dt_ids[] = {
{
.compatible = "fcs,fan53526",
.data = (void *)FAN53526_VENDOR_FAIRCHILD,
}, {
.compatible = "fcs,fan53555",
.data = (void *)FAN53555_VENDOR_FAIRCHILD
}, {
.compatible = "rockchip,rk8600",
.data = (void *)FAN53555_VENDOR_ROCKCHIP
}, {
.compatible = "rockchip,rk8602",
.data = (void *)RK8602_VENDOR_ROCKCHIP
}, {
.compatible = "silergy,syr827",
.data = (void *)FAN53555_VENDOR_SILERGY,
}, {
.compatible = "silergy,syr828",
.data = (void *)FAN53555_VENDOR_SILERGY,
}, {
.compatible = "tcs,tcs4525",
.data = (void *)FAN53526_VENDOR_TCS
}, {
.compatible = "tcs,tcs4526",
.data = (void *)FAN53526_VENDOR_TCS
},
{ }
};
MODULE_DEVICE_TABLE(of, fan53555_dt_ids);
static int fan53555_regulator_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device_node *np = client->dev.of_node;
struct fan53555_device_info *di;
struct fan53555_platform_data *pdata;
struct regulator_config config = { };
struct regmap *regmap;
unsigned int val;
int ret;
di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info),
GFP_KERNEL);
if (!di)
return -ENOMEM;
pdata = dev_get_platdata(&client->dev);
if (!pdata)
pdata = fan53555_parse_dt(&client->dev, np, &di->desc);
if (!pdata || !pdata->regulator)
return dev_err_probe(&client->dev, -ENODEV,
"Platform data not found!\n");
di->regulator = pdata->regulator;
if (client->dev.of_node) {
di->vendor =
(unsigned long)of_device_get_match_data(&client->dev);
} else {
/* if no ramp constraint set, get the pdata ramp_delay */
if (!di->regulator->constraints.ramp_delay) {
if (pdata->slew_rate >= ARRAY_SIZE(slew_rates))
return dev_err_probe(&client->dev, -EINVAL,
"Invalid slew_rate\n");
di->regulator->constraints.ramp_delay
= slew_rates[pdata->slew_rate];
}
di->vendor = id->driver_data;
}
regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(&client->dev, PTR_ERR(regmap),
"Failed to allocate regmap!\n");
di->dev = &client->dev;
i2c_set_clientdata(client, di);
/* Get chip ID */
ret = regmap_read(regmap, FAN53555_ID1, &val);
if (ret < 0)
return dev_err_probe(&client->dev, ret, "Failed to get chip ID!\n");
di->chip_id = val & DIE_ID;
/* Get chip revision */
ret = regmap_read(regmap, FAN53555_ID2, &val);
if (ret < 0)
return dev_err_probe(&client->dev, ret, "Failed to get chip Rev!\n");
di->chip_rev = val & DIE_REV;
dev_info(&client->dev, "FAN53555 Option[%d] Rev[%d] Detected!\n",
di->chip_id, di->chip_rev);
/* Device init */
ret = fan53555_device_setup(di, pdata);
if (ret < 0)
return dev_err_probe(&client->dev, ret, "Failed to setup device!\n");
/* Register regulator */
config.dev = di->dev;
config.init_data = di->regulator;
config.regmap = regmap;
config.driver_data = di;
config.of_node = np;
ret = fan53555_regulator_register(di, &config);
if (ret < 0)
dev_err_probe(&client->dev, ret, "Failed to register regulator!\n");
return ret;
}
static const struct i2c_device_id fan53555_id[] = {
{
.name = "fan53526",
.driver_data = FAN53526_VENDOR_FAIRCHILD
}, {
.name = "fan53555",
.driver_data = FAN53555_VENDOR_FAIRCHILD
}, {
.name = "rk8600",
.driver_data = FAN53555_VENDOR_ROCKCHIP
}, {
.name = "rk8602",
.driver_data = RK8602_VENDOR_ROCKCHIP
}, {
.name = "syr827",
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "syr828",
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "tcs4525",
.driver_data = FAN53526_VENDOR_TCS
}, {
.name = "tcs4526",
.driver_data = FAN53526_VENDOR_TCS
},
{ },
};
MODULE_DEVICE_TABLE(i2c, fan53555_id);
static struct i2c_driver fan53555_regulator_driver = {
.driver = {
.name = "fan53555-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(fan53555_dt_ids),
},
.probe = fan53555_regulator_probe,
.id_table = fan53555_id,
};
module_i2c_driver(fan53555_regulator_driver);
MODULE_AUTHOR("Yunfan Zhang <[email protected]>");
MODULE_DESCRIPTION("FAN53555 regulator driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/regulator/fan53555.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO core
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*/
#include <linux/vfio.h>
#include <linux/iommufd.h>
#include <linux/anon_inodes.h>
#include "vfio.h"
static struct vfio {
struct class *class;
struct list_head group_list;
struct mutex group_lock; /* locks group_list */
struct ida group_ida;
dev_t group_devt;
} vfio;
static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
char *buf)
{
struct vfio_device *it, *device = ERR_PTR(-ENODEV);
mutex_lock(&group->device_lock);
list_for_each_entry(it, &group->device_list, group_next) {
int ret;
if (it->ops->match) {
ret = it->ops->match(it, buf);
if (ret < 0) {
device = ERR_PTR(ret);
break;
}
} else {
ret = !strcmp(dev_name(it->dev), buf);
}
if (ret && vfio_device_try_get_registration(it)) {
device = it;
break;
}
}
mutex_unlock(&group->device_lock);
return device;
}
/*
* VFIO Group fd, /dev/vfio/$GROUP
*/
static bool vfio_group_has_iommu(struct vfio_group *group)
{
lockdep_assert_held(&group->group_lock);
/*
* There can only be users if there is a container, and if there is a
* container there must be users.
*/
WARN_ON(!group->container != !group->container_users);
return group->container || group->iommufd;
}
/*
* VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
* if there was no container to unset. Since the ioctl is called on
* the group, we know that still exists, therefore the only valid
* transition here is 1->0.
*/
static int vfio_group_ioctl_unset_container(struct vfio_group *group)
{
int ret = 0;
mutex_lock(&group->group_lock);
if (!vfio_group_has_iommu(group)) {
ret = -EINVAL;
goto out_unlock;
}
if (group->container) {
if (group->container_users != 1) {
ret = -EBUSY;
goto out_unlock;
}
vfio_group_detach_container(group);
}
if (group->iommufd) {
iommufd_ctx_put(group->iommufd);
group->iommufd = NULL;
}
out_unlock:
mutex_unlock(&group->group_lock);
return ret;
}
static int vfio_group_ioctl_set_container(struct vfio_group *group,
int __user *arg)
{
struct vfio_container *container;
struct iommufd_ctx *iommufd;
struct fd f;
int ret;
int fd;
if (get_user(fd, arg))
return -EFAULT;
f = fdget(fd);
if (!f.file)
return -EBADF;
mutex_lock(&group->group_lock);
if (vfio_group_has_iommu(group)) {
ret = -EINVAL;
goto out_unlock;
}
if (!group->iommu_group) {
ret = -ENODEV;
goto out_unlock;
}
container = vfio_container_from_file(f.file);
if (container) {
ret = vfio_container_attach_group(container, group);
goto out_unlock;
}
iommufd = iommufd_ctx_from_file(f.file);
if (!IS_ERR(iommufd)) {
if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
group->type == VFIO_NO_IOMMU)
ret = iommufd_vfio_compat_set_no_iommu(iommufd);
else
ret = iommufd_vfio_compat_ioas_create(iommufd);
if (ret) {
iommufd_ctx_put(iommufd);
goto out_unlock;
}
group->iommufd = iommufd;
goto out_unlock;
}
/* The FD passed is not recognized. */
ret = -EBADFD;
out_unlock:
mutex_unlock(&group->group_lock);
fdput(f);
return ret;
}
static void vfio_device_group_get_kvm_safe(struct vfio_device *device)
{
spin_lock(&device->group->kvm_ref_lock);
vfio_device_get_kvm_safe(device, device->group->kvm);
spin_unlock(&device->group->kvm_ref_lock);
}
static int vfio_df_group_open(struct vfio_device_file *df)
{
struct vfio_device *device = df->device;
int ret;
mutex_lock(&device->group->group_lock);
if (!vfio_group_has_iommu(device->group)) {
ret = -EINVAL;
goto out_unlock;
}
mutex_lock(&device->dev_set->lock);
/*
* Before the first device open, get the KVM pointer currently
* associated with the group (if there is one) and obtain a reference
* now that will be held until the open_count reaches 0 again. Save
* the pointer in the device for use by drivers.
*/
if (device->open_count == 0)
vfio_device_group_get_kvm_safe(device);
df->iommufd = device->group->iommufd;
if (df->iommufd && vfio_device_is_noiommu(device) && device->open_count == 0) {
/*
* Require no compat ioas to be assigned to proceed. The basic
* statement is that the user cannot have done something that
* implies they expected translation to exist
*/
if (!capable(CAP_SYS_RAWIO) ||
vfio_iommufd_device_has_compat_ioas(device, df->iommufd))
ret = -EPERM;
else
ret = 0;
goto out_put_kvm;
}
ret = vfio_df_open(df);
if (ret)
goto out_put_kvm;
if (df->iommufd && device->open_count == 1) {
ret = vfio_iommufd_compat_attach_ioas(device, df->iommufd);
if (ret)
goto out_close_device;
}
/*
* Paired with smp_load_acquire() in vfio_device_fops::ioctl/
* read/write/mmap and vfio_file_has_device_access()
*/
smp_store_release(&df->access_granted, true);
mutex_unlock(&device->dev_set->lock);
mutex_unlock(&device->group->group_lock);
return 0;
out_close_device:
vfio_df_close(df);
out_put_kvm:
df->iommufd = NULL;
if (device->open_count == 0)
vfio_device_put_kvm(device);
mutex_unlock(&device->dev_set->lock);
out_unlock:
mutex_unlock(&device->group->group_lock);
return ret;
}
void vfio_df_group_close(struct vfio_device_file *df)
{
struct vfio_device *device = df->device;
mutex_lock(&device->group->group_lock);
mutex_lock(&device->dev_set->lock);
vfio_df_close(df);
df->iommufd = NULL;
if (device->open_count == 0)
vfio_device_put_kvm(device);
mutex_unlock(&device->dev_set->lock);
mutex_unlock(&device->group->group_lock);
}
static struct file *vfio_device_open_file(struct vfio_device *device)
{
struct vfio_device_file *df;
struct file *filep;
int ret;
df = vfio_allocate_device_file(device);
if (IS_ERR(df)) {
ret = PTR_ERR(df);
goto err_out;
}
df->group = device->group;
ret = vfio_df_group_open(df);
if (ret)
goto err_free;
/*
* We can't use anon_inode_getfd() because we need to modify
* the f_mode flags directly to allow more than just ioctls
*/
filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
df, O_RDWR);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
}
/*
* TODO: add an anon_inode interface to do this.
* Appears to be missing by lack of need rather than
* explicitly prevented. Now there's need.
*/
filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
if (device->group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));
/*
* On success the ref of device is moved to the file and
* put in vfio_device_fops_release()
*/
return filep;
err_close_device:
vfio_df_group_close(df);
err_free:
kfree(df);
err_out:
return ERR_PTR(ret);
}
static int vfio_group_ioctl_get_device_fd(struct vfio_group *group,
char __user *arg)
{
struct vfio_device *device;
struct file *filep;
char *buf;
int fdno;
int ret;
buf = strndup_user(arg, PAGE_SIZE);
if (IS_ERR(buf))
return PTR_ERR(buf);
device = vfio_device_get_from_name(group, buf);
kfree(buf);
if (IS_ERR(device))
return PTR_ERR(device);
fdno = get_unused_fd_flags(O_CLOEXEC);
if (fdno < 0) {
ret = fdno;
goto err_put_device;
}
filep = vfio_device_open_file(device);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_put_fdno;
}
fd_install(fdno, filep);
return fdno;
err_put_fdno:
put_unused_fd(fdno);
err_put_device:
vfio_device_put_registration(device);
return ret;
}
static int vfio_group_ioctl_get_status(struct vfio_group *group,
struct vfio_group_status __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_group_status, flags);
struct vfio_group_status status;
if (copy_from_user(&status, arg, minsz))
return -EFAULT;
if (status.argsz < minsz)
return -EINVAL;
status.flags = 0;
mutex_lock(&group->group_lock);
if (!group->iommu_group) {
mutex_unlock(&group->group_lock);
return -ENODEV;
}
/*
* With the container FD the iommu_group_claim_dma_owner() is done
* during SET_CONTAINER but for IOMMFD this is done during
* VFIO_GROUP_GET_DEVICE_FD. Meaning that with iommufd
* VFIO_GROUP_FLAGS_VIABLE could be set but GET_DEVICE_FD will fail due
* to viability.
*/
if (vfio_group_has_iommu(group))
status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
VFIO_GROUP_FLAGS_VIABLE;
else if (!iommu_group_dma_owner_claimed(group->iommu_group))
status.flags |= VFIO_GROUP_FLAGS_VIABLE;
mutex_unlock(&group->group_lock);
if (copy_to_user(arg, &status, minsz))
return -EFAULT;
return 0;
}
static long vfio_group_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct vfio_group *group = filep->private_data;
void __user *uarg = (void __user *)arg;
switch (cmd) {
case VFIO_GROUP_GET_DEVICE_FD:
return vfio_group_ioctl_get_device_fd(group, uarg);
case VFIO_GROUP_GET_STATUS:
return vfio_group_ioctl_get_status(group, uarg);
case VFIO_GROUP_SET_CONTAINER:
return vfio_group_ioctl_set_container(group, uarg);
case VFIO_GROUP_UNSET_CONTAINER:
return vfio_group_ioctl_unset_container(group);
default:
return -ENOTTY;
}
}
int vfio_device_block_group(struct vfio_device *device)
{
struct vfio_group *group = device->group;
int ret = 0;
mutex_lock(&group->group_lock);
if (group->opened_file) {
ret = -EBUSY;
goto out_unlock;
}
group->cdev_device_open_cnt++;
out_unlock:
mutex_unlock(&group->group_lock);
return ret;
}
void vfio_device_unblock_group(struct vfio_device *device)
{
struct vfio_group *group = device->group;
mutex_lock(&group->group_lock);
group->cdev_device_open_cnt--;
mutex_unlock(&group->group_lock);
}
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group =
container_of(inode->i_cdev, struct vfio_group, cdev);
int ret;
mutex_lock(&group->group_lock);
/*
* drivers can be zero if this races with vfio_device_remove_group(), it
* will be stable at 0 under the group rwsem
*/
if (refcount_read(&group->drivers) == 0) {
ret = -ENODEV;
goto out_unlock;
}
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
ret = -EPERM;
goto out_unlock;
}
if (group->cdev_device_open_cnt) {
ret = -EBUSY;
goto out_unlock;
}
/*
* Do we need multiple instances of the group open? Seems not.
*/
if (group->opened_file) {
ret = -EBUSY;
goto out_unlock;
}
group->opened_file = filep;
filep->private_data = group;
ret = 0;
out_unlock:
mutex_unlock(&group->group_lock);
return ret;
}
static int vfio_group_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_group *group = filep->private_data;
filep->private_data = NULL;
mutex_lock(&group->group_lock);
/*
* Device FDs hold a group file reference, therefore the group release
* is only called when there are no open devices.
*/
WARN_ON(group->notifier.head);
if (group->container)
vfio_group_detach_container(group);
if (group->iommufd) {
iommufd_ctx_put(group->iommufd);
group->iommufd = NULL;
}
group->opened_file = NULL;
mutex_unlock(&group->group_lock);
return 0;
}
static const struct file_operations vfio_group_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vfio_group_fops_unl_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = vfio_group_fops_open,
.release = vfio_group_fops_release,
};
/*
* Group objects - create, release, get, put, search
*/
static struct vfio_group *
vfio_group_find_from_iommu(struct iommu_group *iommu_group)
{
struct vfio_group *group;
lockdep_assert_held(&vfio.group_lock);
/*
* group->iommu_group from the vfio.group_list cannot be NULL
* under the vfio.group_lock.
*/
list_for_each_entry(group, &vfio.group_list, vfio_next) {
if (group->iommu_group == iommu_group)
return group;
}
return NULL;
}
static void vfio_group_release(struct device *dev)
{
struct vfio_group *group = container_of(dev, struct vfio_group, dev);
mutex_destroy(&group->device_lock);
mutex_destroy(&group->group_lock);
WARN_ON(group->iommu_group);
WARN_ON(group->cdev_device_open_cnt);
ida_free(&vfio.group_ida, MINOR(group->dev.devt));
kfree(group);
}
static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
enum vfio_group_type type)
{
struct vfio_group *group;
int minor;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL);
if (minor < 0) {
kfree(group);
return ERR_PTR(minor);
}
device_initialize(&group->dev);
group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor);
group->dev.class = vfio.class;
group->dev.release = vfio_group_release;
cdev_init(&group->cdev, &vfio_group_fops);
group->cdev.owner = THIS_MODULE;
refcount_set(&group->drivers, 1);
mutex_init(&group->group_lock);
spin_lock_init(&group->kvm_ref_lock);
INIT_LIST_HEAD(&group->device_list);
mutex_init(&group->device_lock);
group->iommu_group = iommu_group;
/* put in vfio_group_release() */
iommu_group_ref_get(iommu_group);
group->type = type;
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
return group;
}
static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
enum vfio_group_type type)
{
struct vfio_group *group;
struct vfio_group *ret;
int err;
lockdep_assert_held(&vfio.group_lock);
group = vfio_group_alloc(iommu_group, type);
if (IS_ERR(group))
return group;
err = dev_set_name(&group->dev, "%s%d",
group->type == VFIO_NO_IOMMU ? "noiommu-" : "",
iommu_group_id(iommu_group));
if (err) {
ret = ERR_PTR(err);
goto err_put;
}
err = cdev_device_add(&group->cdev, &group->dev);
if (err) {
ret = ERR_PTR(err);
goto err_put;
}
list_add(&group->vfio_next, &vfio.group_list);
return group;
err_put:
put_device(&group->dev);
return ret;
}
static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
enum vfio_group_type type)
{
struct iommu_group *iommu_group;
struct vfio_group *group;
int ret;
iommu_group = iommu_group_alloc();
if (IS_ERR(iommu_group))
return ERR_CAST(iommu_group);
ret = iommu_group_set_name(iommu_group, "vfio-noiommu");
if (ret)
goto out_put_group;
ret = iommu_group_add_device(iommu_group, dev);
if (ret)
goto out_put_group;
mutex_lock(&vfio.group_lock);
group = vfio_create_group(iommu_group, type);
mutex_unlock(&vfio.group_lock);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_remove_device;
}
iommu_group_put(iommu_group);
return group;
out_remove_device:
iommu_group_remove_device(dev);
out_put_group:
iommu_group_put(iommu_group);
return ERR_PTR(ret);
}
static bool vfio_group_has_device(struct vfio_group *group, struct device *dev)
{
struct vfio_device *device;
mutex_lock(&group->device_lock);
list_for_each_entry(device, &group->device_list, group_next) {
if (device->dev == dev) {
mutex_unlock(&group->device_lock);
return true;
}
}
mutex_unlock(&group->device_lock);
return false;
}
static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
{
struct iommu_group *iommu_group;
struct vfio_group *group;
iommu_group = iommu_group_get(dev);
if (!iommu_group && vfio_noiommu) {
/*
* With noiommu enabled, create an IOMMU group for devices that
* don't already have one, implying no IOMMU hardware/driver
* exists. Taint the kernel because we're about to give a DMA
* capable device to a user without IOMMU protection.
*/
group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU);
if (!IS_ERR(group)) {
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
}
return group;
}
if (!iommu_group)
return ERR_PTR(-EINVAL);
mutex_lock(&vfio.group_lock);
group = vfio_group_find_from_iommu(iommu_group);
if (group) {
if (WARN_ON(vfio_group_has_device(group, dev)))
group = ERR_PTR(-EINVAL);
else
refcount_inc(&group->drivers);
} else {
group = vfio_create_group(iommu_group, VFIO_IOMMU);
}
mutex_unlock(&vfio.group_lock);
/* The vfio_group holds a reference to the iommu_group */
iommu_group_put(iommu_group);
return group;
}
int vfio_device_set_group(struct vfio_device *device,
enum vfio_group_type type)
{
struct vfio_group *group;
if (type == VFIO_IOMMU)
group = vfio_group_find_or_alloc(device->dev);
else
group = vfio_noiommu_group_alloc(device->dev, type);
if (IS_ERR(group))
return PTR_ERR(group);
/* Our reference on group is moved to the device */
device->group = group;
return 0;
}
void vfio_device_remove_group(struct vfio_device *device)
{
struct vfio_group *group = device->group;
struct iommu_group *iommu_group;
if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
iommu_group_remove_device(device->dev);
/* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */
if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock))
return;
list_del(&group->vfio_next);
/*
* We could concurrently probe another driver in the group that might
* race vfio_device_remove_group() with vfio_get_group(), so we have to
* ensure that the sysfs is all cleaned up under lock otherwise the
* cdev_device_add() will fail due to the name aready existing.
*/
cdev_device_del(&group->cdev, &group->dev);
mutex_lock(&group->group_lock);
/*
* These data structures all have paired operations that can only be
* undone when the caller holds a live reference on the device. Since
* all pairs must be undone these WARN_ON's indicate some caller did not
* properly hold the group reference.
*/
WARN_ON(!list_empty(&group->device_list));
WARN_ON(group->notifier.head);
/*
* Revoke all users of group->iommu_group. At this point we know there
* are no devices active because we are unplugging the last one. Setting
* iommu_group to NULL blocks all new users.
*/
if (group->container)
vfio_group_detach_container(group);
iommu_group = group->iommu_group;
group->iommu_group = NULL;
mutex_unlock(&group->group_lock);
mutex_unlock(&vfio.group_lock);
iommu_group_put(iommu_group);
put_device(&group->dev);
}
void vfio_device_group_register(struct vfio_device *device)
{
mutex_lock(&device->group->device_lock);
list_add(&device->group_next, &device->group->device_list);
mutex_unlock(&device->group->device_lock);
}
void vfio_device_group_unregister(struct vfio_device *device)
{
mutex_lock(&device->group->device_lock);
list_del(&device->group_next);
mutex_unlock(&device->group->device_lock);
}
int vfio_device_group_use_iommu(struct vfio_device *device)
{
struct vfio_group *group = device->group;
int ret = 0;
lockdep_assert_held(&group->group_lock);
if (WARN_ON(!group->container))
return -EINVAL;
ret = vfio_group_use_container(group);
if (ret)
return ret;
vfio_device_container_register(device);
return 0;
}
void vfio_device_group_unuse_iommu(struct vfio_device *device)
{
struct vfio_group *group = device->group;
lockdep_assert_held(&group->group_lock);
if (WARN_ON(!group->container))
return;
vfio_device_container_unregister(device);
vfio_group_unuse_container(group);
}
bool vfio_device_has_container(struct vfio_device *device)
{
return device->group->container;
}
struct vfio_group *vfio_group_from_file(struct file *file)
{
struct vfio_group *group = file->private_data;
if (file->f_op != &vfio_group_fops)
return NULL;
return group;
}
/**
* vfio_file_iommu_group - Return the struct iommu_group for the vfio group file
* @file: VFIO group file
*
* The returned iommu_group is valid as long as a ref is held on the file. This
* returns a reference on the group. This function is deprecated, only the SPAPR
* path in kvm should call it.
*/
struct iommu_group *vfio_file_iommu_group(struct file *file)
{
struct vfio_group *group = vfio_group_from_file(file);
struct iommu_group *iommu_group = NULL;
if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU))
return NULL;
if (!group)
return NULL;
mutex_lock(&group->group_lock);
if (group->iommu_group) {
iommu_group = group->iommu_group;
iommu_group_ref_get(iommu_group);
}
mutex_unlock(&group->group_lock);
return iommu_group;
}
EXPORT_SYMBOL_GPL(vfio_file_iommu_group);
/**
* vfio_file_is_group - True if the file is a vfio group file
* @file: VFIO group file
*/
bool vfio_file_is_group(struct file *file)
{
return vfio_group_from_file(file);
}
EXPORT_SYMBOL_GPL(vfio_file_is_group);
bool vfio_group_enforced_coherent(struct vfio_group *group)
{
struct vfio_device *device;
bool ret = true;
/*
* If the device does not have IOMMU_CAP_ENFORCE_CACHE_COHERENCY then
* any domain later attached to it will also not support it. If the cap
* is set then the iommu_domain eventually attached to the device/group
* must use a domain with enforce_cache_coherency().
*/
mutex_lock(&group->device_lock);
list_for_each_entry(device, &group->device_list, group_next) {
if (!device_iommu_capable(device->dev,
IOMMU_CAP_ENFORCE_CACHE_COHERENCY)) {
ret = false;
break;
}
}
mutex_unlock(&group->device_lock);
return ret;
}
void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
{
spin_lock(&group->kvm_ref_lock);
group->kvm = kvm;
spin_unlock(&group->kvm_ref_lock);
}
/**
* vfio_file_has_dev - True if the VFIO file is a handle for device
* @file: VFIO file to check
* @device: Device that must be part of the file
*
* Returns true if given file has permission to manipulate the given device.
*/
bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
{
struct vfio_group *group = vfio_group_from_file(file);
if (!group)
return false;
return group == device->group;
}
EXPORT_SYMBOL_GPL(vfio_file_has_dev);
static char *vfio_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
}
int __init vfio_group_init(void)
{
int ret;
ida_init(&vfio.group_ida);
mutex_init(&vfio.group_lock);
INIT_LIST_HEAD(&vfio.group_list);
ret = vfio_container_init();
if (ret)
return ret;
/* /dev/vfio/$GROUP */
vfio.class = class_create("vfio");
if (IS_ERR(vfio.class)) {
ret = PTR_ERR(vfio.class);
goto err_group_class;
}
vfio.class->devnode = vfio_devnode;
ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
if (ret)
goto err_alloc_chrdev;
return 0;
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
err_group_class:
vfio_container_cleanup();
return ret;
}
void vfio_group_cleanup(void)
{
WARN_ON(!list_empty(&vfio.group_list));
ida_destroy(&vfio.group_ida);
unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
class_destroy(vfio.class);
vfio.class = NULL;
vfio_container_cleanup();
}
| linux-master | drivers/vfio/group.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO: IOMMU DMA mapping support for TCE on POWER
*
* Copyright (C) 2013 IBM Corp. All rights reserved.
* Author: Alexey Kardashevskiy <[email protected]>
* Copyright Gavin Shan, IBM Corporation 2014.
*
* Derived from original vfio_iommu_type1.c:
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/err.h>
#include <linux/vfio.h>
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
#include "vfio.h"
#include <asm/iommu.h>
#include <asm/tce.h>
#include <asm/mmu_context.h>
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "[email protected]"
#define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
static void tce_iommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group);
/*
* VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
*/
struct tce_iommu_group {
struct list_head next;
struct iommu_group *grp;
};
/*
* A container needs to remember which preregistered region it has
* referenced to do proper cleanup at the userspace process exit.
*/
struct tce_iommu_prereg {
struct list_head next;
struct mm_iommu_table_group_mem_t *mem;
};
/*
* The container descriptor supports only a single group per container.
* Required by the API as the container is not supplied with the IOMMU group
* at the moment of initialization.
*/
struct tce_container {
struct mutex lock;
bool enabled;
bool v2;
bool def_window_pending;
unsigned long locked_pages;
struct mm_struct *mm;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct list_head group_list;
struct list_head prereg_list;
};
static long tce_iommu_mm_set(struct tce_container *container)
{
if (container->mm) {
if (container->mm == current->mm)
return 0;
return -EPERM;
}
BUG_ON(!current->mm);
container->mm = current->mm;
mmgrab(container->mm);
return 0;
}
static long tce_iommu_prereg_free(struct tce_container *container,
struct tce_iommu_prereg *tcemem)
{
long ret;
ret = mm_iommu_put(container->mm, tcemem->mem);
if (ret)
return ret;
list_del(&tcemem->next);
kfree(tcemem);
return 0;
}
static long tce_iommu_unregister_pages(struct tce_container *container,
__u64 vaddr, __u64 size)
{
struct mm_iommu_table_group_mem_t *mem;
struct tce_iommu_prereg *tcemem;
bool found = false;
long ret;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
if (!mem)
return -ENOENT;
list_for_each_entry(tcemem, &container->prereg_list, next) {
if (tcemem->mem == mem) {
found = true;
break;
}
}
if (!found)
ret = -ENOENT;
else
ret = tce_iommu_prereg_free(container, tcemem);
mm_iommu_put(container->mm, mem);
return ret;
}
static long tce_iommu_register_pages(struct tce_container *container,
__u64 vaddr, __u64 size)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem = NULL;
struct tce_iommu_prereg *tcemem;
unsigned long entries = size >> PAGE_SHIFT;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr))
return -EINVAL;
mem = mm_iommu_get(container->mm, vaddr, entries);
if (mem) {
list_for_each_entry(tcemem, &container->prereg_list, next) {
if (tcemem->mem == mem) {
ret = -EBUSY;
goto put_exit;
}
}
} else {
ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
}
tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
if (!tcemem) {
ret = -ENOMEM;
goto put_exit;
}
tcemem->mem = mem;
list_add(&tcemem->next, &container->prereg_list);
container->enabled = true;
return 0;
put_exit:
mm_iommu_put(container->mm, mem);
return ret;
}
static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
unsigned int it_page_shift)
{
struct page *page;
unsigned long size = 0;
if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
return size == (1UL << it_page_shift);
page = pfn_to_page(hpa >> PAGE_SHIFT);
/*
* Check that the TCE table granularity is not bigger than the size of
* a page we just found. Otherwise the hardware can get access to
* a bigger memory chunk that it should.
*/
return page_shift(compound_head(page)) >= it_page_shift;
}
static inline bool tce_groups_attached(struct tce_container *container)
{
return !list_empty(&container->group_list);
}
static long tce_iommu_find_table(struct tce_container *container,
phys_addr_t ioba, struct iommu_table **ptbl)
{
long i;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = container->tables[i];
if (tbl) {
unsigned long entry = ioba >> tbl->it_page_shift;
unsigned long start = tbl->it_offset;
unsigned long end = start + tbl->it_size;
if ((start <= entry) && (entry < end)) {
*ptbl = tbl;
return i;
}
}
}
return -1;
}
static int tce_iommu_find_free_table(struct tce_container *container)
{
int i;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
if (!container->tables[i])
return i;
}
return -ENOSPC;
}
static int tce_iommu_enable(struct tce_container *container)
{
int ret = 0;
unsigned long locked;
struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp;
if (container->enabled)
return -EBUSY;
/*
* When userspace pages are mapped into the IOMMU, they are effectively
* locked memory, so, theoretically, we need to update the accounting
* of locked pages on each map and unmap. For powerpc, the map unmap
* paths can be very hot, though, and the accounting would kill
* performance, especially since it would be difficult to impossible
* to handle the accounting in real mode only.
*
* To address that, rather than precisely accounting every page, we
* instead account for a worst case on locked memory when the iommu is
* enabled and disabled. The worst case upper bound on locked memory
* is the size of the whole iommu window, which is usually relatively
* small (compared to total memory sizes) on POWER hardware.
*
* Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
* that would effectively kill the guest at random points, much better
* enforcing the limit based on the max that the guest can map.
*
* Unfortunately at the moment it counts whole tables, no matter how
* much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
* each with 2GB DMA window, 8GB will be counted here. The reason for
* this is that we cannot tell here the amount of RAM used by the guest
* as this information is only available from KVM and VFIO is
* KVM agnostic.
*
* So we do not allow enabling a container without a group attached
* as there is no way to know how much we should increment
* the locked_vm counter.
*/
if (!tce_groups_attached(container))
return -ENODEV;
tcegrp = list_first_entry(&container->group_list,
struct tce_iommu_group, next);
table_group = iommu_group_get_iommudata(tcegrp->grp);
if (!table_group)
return -ENODEV;
if (!table_group->tce32_size)
return -EPERM;
ret = tce_iommu_mm_set(container);
if (ret)
return ret;
locked = table_group->tce32_size >> PAGE_SHIFT;
ret = account_locked_vm(container->mm, locked, true);
if (ret)
return ret;
container->locked_pages = locked;
container->enabled = true;
return ret;
}
static void tce_iommu_disable(struct tce_container *container)
{
if (!container->enabled)
return;
container->enabled = false;
BUG_ON(!container->mm);
account_locked_vm(container->mm, container->locked_pages, false);
}
static void *tce_iommu_open(unsigned long arg)
{
struct tce_container *container;
if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
pr_err("tce_vfio: Wrong IOMMU type\n");
return ERR_PTR(-EINVAL);
}
container = kzalloc(sizeof(*container), GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
mutex_init(&container->lock);
INIT_LIST_HEAD_RCU(&container->group_list);
INIT_LIST_HEAD_RCU(&container->prereg_list);
container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
return container;
}
static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long pages);
static void tce_iommu_free_table(struct tce_container *container,
struct iommu_table *tbl);
static void tce_iommu_release(void *iommu_data)
{
struct tce_container *container = iommu_data;
struct tce_iommu_group *tcegrp;
struct tce_iommu_prereg *tcemem, *tmtmp;
long i;
while (tce_groups_attached(container)) {
tcegrp = list_first_entry(&container->group_list,
struct tce_iommu_group, next);
tce_iommu_detach_group(iommu_data, tcegrp->grp);
}
/*
* If VFIO created a table, it was not disposed
* by tce_iommu_detach_group() so do it now.
*/
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = container->tables[i];
if (!tbl)
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
tce_iommu_free_table(container, tbl);
}
list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
WARN_ON(tce_iommu_prereg_free(container, tcemem));
tce_iommu_disable(container);
if (container->mm)
mmdrop(container->mm);
mutex_destroy(&container->lock);
kfree(container);
}
static void tce_iommu_unuse_page(unsigned long hpa)
{
struct page *page;
page = pfn_to_page(hpa >> PAGE_SHIFT);
unpin_user_page(page);
}
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
unsigned long tce, unsigned long shift,
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem;
mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
if (!mem)
return -EINVAL;
ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
if (ret)
return -EINVAL;
*pmem = mem;
return 0;
}
static void tce_iommu_unuse_page_v2(struct tce_container *container,
struct iommu_table *tbl, unsigned long entry)
{
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
return;
ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
tbl->it_page_shift, &hpa, &mem);
if (ret)
pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
__func__, be64_to_cpu(*pua), entry, ret);
if (mem)
mm_iommu_mapped_dec(mem);
*pua = cpu_to_be64(0);
}
static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long pages)
{
unsigned long oldhpa;
long ret;
enum dma_data_direction direction;
unsigned long lastentry = entry + pages, firstentry = entry;
for ( ; entry < lastentry; ++entry) {
if (tbl->it_indirect_levels && tbl->it_userspace) {
/*
* For multilevel tables, we can take a shortcut here
* and skip some TCEs as we know that the userspace
* addresses cache is a mirror of the real TCE table
* and if it is missing some indirect levels, then
* the hardware table does not have them allocated
* either and therefore does not require updating.
*/
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
entry);
if (!pua) {
/* align to level_size which is power of two */
entry |= tbl->it_level_size - 1;
continue;
}
}
cond_resched();
direction = DMA_NONE;
oldhpa = 0;
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
&direction);
if (ret)
continue;
if (direction == DMA_NONE)
continue;
if (container->v2) {
tce_iommu_unuse_page_v2(container, tbl, entry);
continue;
}
tce_iommu_unuse_page(oldhpa);
}
iommu_tce_kill(tbl, firstentry, pages);
return 0;
}
static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
{
struct page *page = NULL;
enum dma_data_direction direction = iommu_tce_direction(tce);
if (pin_user_pages_fast(tce & PAGE_MASK, 1,
direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
&page) != 1)
return -EFAULT;
*hpa = __pa((unsigned long) page_address(page));
return 0;
}
static long tce_iommu_build(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long tce, unsigned long pages,
enum dma_data_direction direction)
{
long i, ret = 0;
unsigned long hpa;
enum dma_data_direction dirtmp;
for (i = 0; i < pages; ++i) {
unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
ret = tce_iommu_use_page(tce, &hpa);
if (ret)
break;
if (!tce_page_is_contained(container->mm, hpa,
tbl->it_page_shift)) {
ret = -EPERM;
break;
}
hpa |= offset;
dirtmp = direction;
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&hpa, &dirtmp);
if (ret) {
tce_iommu_unuse_page(hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
break;
}
if (dirtmp != DMA_NONE)
tce_iommu_unuse_page(hpa);
tce += IOMMU_PAGE_SIZE(tbl);
}
if (ret)
tce_iommu_clear(container, tbl, entry, i);
else
iommu_tce_kill(tbl, entry, pages);
return ret;
}
static long tce_iommu_build_v2(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long tce, unsigned long pages,
enum dma_data_direction direction)
{
long i, ret = 0;
unsigned long hpa;
enum dma_data_direction dirtmp;
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
ret = tce_iommu_prereg_ua_to_hpa(container,
tce, tbl->it_page_shift, &hpa, &mem);
if (ret)
break;
if (!tce_page_is_contained(container->mm, hpa,
tbl->it_page_shift)) {
ret = -EPERM;
break;
}
/* Preserve offset within IOMMU page */
hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
dirtmp = direction;
/* The registered region is being unregistered */
if (mm_iommu_mapped_inc(mem))
break;
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&hpa, &dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
tce_iommu_unuse_page_v2(container, tbl, entry + i);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
break;
}
if (dirtmp != DMA_NONE)
tce_iommu_unuse_page_v2(container, tbl, entry + i);
*pua = cpu_to_be64(tce);
tce += IOMMU_PAGE_SIZE(tbl);
}
if (ret)
tce_iommu_clear(container, tbl, entry, i);
else
iommu_tce_kill(tbl, entry, pages);
return ret;
}
static long tce_iommu_create_table(struct tce_container *container,
struct iommu_table_group *table_group,
int num,
__u32 page_shift,
__u64 window_size,
__u32 levels,
struct iommu_table **ptbl)
{
long ret, table_size;
table_size = table_group->ops->get_table_size(page_shift, window_size,
levels);
if (!table_size)
return -EINVAL;
ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
if (ret)
return ret;
ret = table_group->ops->create_table(table_group, num,
page_shift, window_size, levels, ptbl);
WARN_ON(!ret && !(*ptbl)->it_ops->free);
WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
return ret;
}
static void tce_iommu_free_table(struct tce_container *container,
struct iommu_table *tbl)
{
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
iommu_tce_table_put(tbl);
account_locked_vm(container->mm, pages, false);
}
static long tce_iommu_create_window(struct tce_container *container,
__u32 page_shift, __u64 window_size, __u32 levels,
__u64 *start_addr)
{
struct tce_iommu_group *tcegrp;
struct iommu_table_group *table_group;
struct iommu_table *tbl = NULL;
long ret, num;
num = tce_iommu_find_free_table(container);
if (num < 0)
return num;
/* Get the first group for ops::create_table */
tcegrp = list_first_entry(&container->group_list,
struct tce_iommu_group, next);
table_group = iommu_group_get_iommudata(tcegrp->grp);
if (!table_group)
return -EFAULT;
if (!(table_group->pgsizes & (1ULL << page_shift)))
return -EINVAL;
if (!table_group->ops->set_window || !table_group->ops->unset_window ||
!table_group->ops->get_table_size ||
!table_group->ops->create_table)
return -EPERM;
/* Create TCE table */
ret = tce_iommu_create_table(container, table_group, num,
page_shift, window_size, levels, &tbl);
if (ret)
return ret;
BUG_ON(!tbl->it_ops->free);
/*
* Program the table to every group.
* Groups have been tested for compatibility at the attach time.
*/
list_for_each_entry(tcegrp, &container->group_list, next) {
table_group = iommu_group_get_iommudata(tcegrp->grp);
ret = table_group->ops->set_window(table_group, num, tbl);
if (ret)
goto unset_exit;
}
container->tables[num] = tbl;
/* Return start address assigned by platform in create_table() */
*start_addr = tbl->it_offset << tbl->it_page_shift;
return 0;
unset_exit:
list_for_each_entry(tcegrp, &container->group_list, next) {
table_group = iommu_group_get_iommudata(tcegrp->grp);
table_group->ops->unset_window(table_group, num);
}
tce_iommu_free_table(container, tbl);
return ret;
}
static long tce_iommu_remove_window(struct tce_container *container,
__u64 start_addr)
{
struct iommu_table_group *table_group = NULL;
struct iommu_table *tbl;
struct tce_iommu_group *tcegrp;
int num;
num = tce_iommu_find_table(container, start_addr, &tbl);
if (num < 0)
return -EINVAL;
BUG_ON(!tbl->it_size);
/* Detach groups from IOMMUs */
list_for_each_entry(tcegrp, &container->group_list, next) {
table_group = iommu_group_get_iommudata(tcegrp->grp);
/*
* SPAPR TCE IOMMU exposes the default DMA window to
* the guest via dma32_window_start/size of
* VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
* the userspace to remove this window, some do not so
* here we check for the platform capability.
*/
if (!table_group->ops || !table_group->ops->unset_window)
return -EPERM;
table_group->ops->unset_window(table_group, num);
}
/* Free table */
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
tce_iommu_free_table(container, tbl);
container->tables[num] = NULL;
return 0;
}
static long tce_iommu_create_default_window(struct tce_container *container)
{
long ret;
__u64 start_addr = 0;
struct tce_iommu_group *tcegrp;
struct iommu_table_group *table_group;
if (!container->def_window_pending)
return 0;
if (!tce_groups_attached(container))
return -ENODEV;
tcegrp = list_first_entry(&container->group_list,
struct tce_iommu_group, next);
table_group = iommu_group_get_iommudata(tcegrp->grp);
if (!table_group)
return -ENODEV;
ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
table_group->tce32_size, 1, &start_addr);
WARN_ON_ONCE(!ret && start_addr);
if (!ret)
container->def_window_pending = false;
return ret;
}
static long vfio_spapr_ioctl_eeh_pe_op(struct iommu_group *group,
unsigned long arg)
{
struct eeh_pe *pe;
struct vfio_eeh_pe_op op;
unsigned long minsz;
pe = eeh_iommu_group_to_pe(group);
if (!pe)
return -ENODEV;
minsz = offsetofend(struct vfio_eeh_pe_op, op);
if (copy_from_user(&op, (void __user *)arg, minsz))
return -EFAULT;
if (op.argsz < minsz || op.flags)
return -EINVAL;
switch (op.op) {
case VFIO_EEH_PE_DISABLE:
return eeh_pe_set_option(pe, EEH_OPT_DISABLE);
case VFIO_EEH_PE_ENABLE:
return eeh_pe_set_option(pe, EEH_OPT_ENABLE);
case VFIO_EEH_PE_UNFREEZE_IO:
return eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO);
case VFIO_EEH_PE_UNFREEZE_DMA:
return eeh_pe_set_option(pe, EEH_OPT_THAW_DMA);
case VFIO_EEH_PE_GET_STATE:
return eeh_pe_get_state(pe);
break;
case VFIO_EEH_PE_RESET_DEACTIVATE:
return eeh_pe_reset(pe, EEH_RESET_DEACTIVATE, true);
case VFIO_EEH_PE_RESET_HOT:
return eeh_pe_reset(pe, EEH_RESET_HOT, true);
case VFIO_EEH_PE_RESET_FUNDAMENTAL:
return eeh_pe_reset(pe, EEH_RESET_FUNDAMENTAL, true);
case VFIO_EEH_PE_CONFIGURE:
return eeh_pe_configure(pe);
case VFIO_EEH_PE_INJECT_ERR:
minsz = offsetofend(struct vfio_eeh_pe_op, err.mask);
if (op.argsz < minsz)
return -EINVAL;
if (copy_from_user(&op, (void __user *)arg, minsz))
return -EFAULT;
return eeh_pe_inject_err(pe, op.err.type, op.err.func,
op.err.addr, op.err.mask);
default:
return -EINVAL;
}
}
static long tce_iommu_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
struct tce_container *container = iommu_data;
unsigned long minsz, ddwsz;
long ret;
switch (cmd) {
case VFIO_CHECK_EXTENSION:
switch (arg) {
case VFIO_SPAPR_TCE_IOMMU:
case VFIO_SPAPR_TCE_v2_IOMMU:
return 1;
case VFIO_EEH:
return eeh_enabled();
default:
return 0;
}
}
/*
* Sanity check to prevent one userspace from manipulating
* another userspace mm.
*/
BUG_ON(!container);
if (container->mm && container->mm != current->mm)
return -EPERM;
switch (cmd) {
case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
struct vfio_iommu_spapr_tce_info info;
struct tce_iommu_group *tcegrp;
struct iommu_table_group *table_group;
if (!tce_groups_attached(container))
return -ENXIO;
tcegrp = list_first_entry(&container->group_list,
struct tce_iommu_group, next);
table_group = iommu_group_get_iommudata(tcegrp->grp);
if (!table_group)
return -ENXIO;
minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
dma32_window_size);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
info.dma32_window_start = table_group->tce32_start;
info.dma32_window_size = table_group->tce32_size;
info.flags = 0;
memset(&info.ddw, 0, sizeof(info.ddw));
if (table_group->max_dynamic_windows_supported &&
container->v2) {
info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
info.ddw.pgsizes = table_group->pgsizes;
info.ddw.max_dynamic_windows_supported =
table_group->max_dynamic_windows_supported;
info.ddw.levels = table_group->max_levels;
}
ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
if (info.argsz >= ddwsz)
minsz = ddwsz;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_IOMMU_MAP_DMA: {
struct vfio_iommu_type1_dma_map param;
struct iommu_table *tbl = NULL;
long num;
enum dma_data_direction direction;
if (!container->enabled)
return -EPERM;
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
if (copy_from_user(¶m, (void __user *)arg, minsz))
return -EFAULT;
if (param.argsz < minsz)
return -EINVAL;
if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
ret = tce_iommu_create_default_window(container);
if (ret)
return ret;
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
(param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
return -EINVAL;
/* iova is checked by the IOMMU API */
if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
direction = DMA_BIDIRECTIONAL;
else
direction = DMA_TO_DEVICE;
} else {
if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
direction = DMA_FROM_DEVICE;
else
return -EINVAL;
}
ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
if (ret)
return ret;
if (container->v2)
ret = tce_iommu_build_v2(container, tbl,
param.iova >> tbl->it_page_shift,
param.vaddr,
param.size >> tbl->it_page_shift,
direction);
else
ret = tce_iommu_build(container, tbl,
param.iova >> tbl->it_page_shift,
param.vaddr,
param.size >> tbl->it_page_shift,
direction);
iommu_flush_tce(tbl);
return ret;
}
case VFIO_IOMMU_UNMAP_DMA: {
struct vfio_iommu_type1_dma_unmap param;
struct iommu_table *tbl = NULL;
long num;
if (!container->enabled)
return -EPERM;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
size);
if (copy_from_user(¶m, (void __user *)arg, minsz))
return -EFAULT;
if (param.argsz < minsz)
return -EINVAL;
/* No flag is supported now */
if (param.flags)
return -EINVAL;
ret = tce_iommu_create_default_window(container);
if (ret)
return ret;
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
if (param.size & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
param.size >> tbl->it_page_shift);
if (ret)
return ret;
ret = tce_iommu_clear(container, tbl,
param.iova >> tbl->it_page_shift,
param.size >> tbl->it_page_shift);
iommu_flush_tce(tbl);
return ret;
}
case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
struct vfio_iommu_spapr_register_memory param;
if (!container->v2)
break;
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
ret = tce_iommu_mm_set(container);
if (ret)
return ret;
if (copy_from_user(¶m, (void __user *)arg, minsz))
return -EFAULT;
if (param.argsz < minsz)
return -EINVAL;
/* No flag is supported now */
if (param.flags)
return -EINVAL;
mutex_lock(&container->lock);
ret = tce_iommu_register_pages(container, param.vaddr,
param.size);
mutex_unlock(&container->lock);
return ret;
}
case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
struct vfio_iommu_spapr_register_memory param;
if (!container->v2)
break;
if (!container->mm)
return -EPERM;
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
if (copy_from_user(¶m, (void __user *)arg, minsz))
return -EFAULT;
if (param.argsz < minsz)
return -EINVAL;
/* No flag is supported now */
if (param.flags)
return -EINVAL;
mutex_lock(&container->lock);
ret = tce_iommu_unregister_pages(container, param.vaddr,
param.size);
mutex_unlock(&container->lock);
return ret;
}
case VFIO_IOMMU_ENABLE:
if (container->v2)
break;
mutex_lock(&container->lock);
ret = tce_iommu_enable(container);
mutex_unlock(&container->lock);
return ret;
case VFIO_IOMMU_DISABLE:
if (container->v2)
break;
mutex_lock(&container->lock);
tce_iommu_disable(container);
mutex_unlock(&container->lock);
return 0;
case VFIO_EEH_PE_OP: {
struct tce_iommu_group *tcegrp;
ret = 0;
list_for_each_entry(tcegrp, &container->group_list, next) {
ret = vfio_spapr_ioctl_eeh_pe_op(tcegrp->grp, arg);
if (ret)
return ret;
}
return ret;
}
case VFIO_IOMMU_SPAPR_TCE_CREATE: {
struct vfio_iommu_spapr_tce_create create;
if (!container->v2)
break;
ret = tce_iommu_mm_set(container);
if (ret)
return ret;
if (!tce_groups_attached(container))
return -ENXIO;
minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
start_addr);
if (copy_from_user(&create, (void __user *)arg, minsz))
return -EFAULT;
if (create.argsz < minsz)
return -EINVAL;
if (create.flags)
return -EINVAL;
mutex_lock(&container->lock);
ret = tce_iommu_create_default_window(container);
if (!ret)
ret = tce_iommu_create_window(container,
create.page_shift,
create.window_size, create.levels,
&create.start_addr);
mutex_unlock(&container->lock);
if (!ret && copy_to_user((void __user *)arg, &create, minsz))
ret = -EFAULT;
return ret;
}
case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
struct vfio_iommu_spapr_tce_remove remove;
if (!container->v2)
break;
ret = tce_iommu_mm_set(container);
if (ret)
return ret;
if (!tce_groups_attached(container))
return -ENXIO;
minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
start_addr);
if (copy_from_user(&remove, (void __user *)arg, minsz))
return -EFAULT;
if (remove.argsz < minsz)
return -EINVAL;
if (remove.flags)
return -EINVAL;
if (container->def_window_pending && !remove.start_addr) {
container->def_window_pending = false;
return 0;
}
mutex_lock(&container->lock);
ret = tce_iommu_remove_window(container, remove.start_addr);
mutex_unlock(&container->lock);
return ret;
}
}
return -ENOTTY;
}
static void tce_iommu_release_ownership(struct tce_container *container,
struct iommu_table_group *table_group)
{
long i;
if (!table_group->ops->unset_window) {
WARN_ON_ONCE(1);
return;
}
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
if (container->tables[i])
table_group->ops->unset_window(table_group, i);
}
static long tce_iommu_take_ownership(struct tce_container *container,
struct iommu_table_group *table_group)
{
long i, ret = 0;
/* Set all windows to the new group */
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = container->tables[i];
if (!tbl)
continue;
ret = table_group->ops->set_window(table_group, i, tbl);
if (ret)
goto release_exit;
}
return 0;
release_exit:
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
table_group->ops->unset_window(table_group, i);
return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
struct iommu_group *iommu_group, enum vfio_group_type type)
{
int ret = 0;
struct tce_container *container = iommu_data;
struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp = NULL;
if (type == VFIO_EMULATED_IOMMU)
return -EINVAL;
mutex_lock(&container->lock);
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
if (!table_group) {
ret = -ENODEV;
goto unlock_exit;
}
/* v2 requires full support of dynamic DMA windows */
if (container->v2 && table_group->max_dynamic_windows_supported == 0) {
ret = -EINVAL;
goto unlock_exit;
}
/* v1 reuses TCE tables and does not share them among PEs */
if (!container->v2 && tce_groups_attached(container)) {
ret = -EBUSY;
goto unlock_exit;
}
/*
* Check if new group has the same iommu_table_group_ops
* (i.e. compatible)
*/
list_for_each_entry(tcegrp, &container->group_list, next) {
struct iommu_table_group *table_group_tmp;
if (tcegrp->grp == iommu_group) {
pr_warn("tce_vfio: Group %d is already attached\n",
iommu_group_id(iommu_group));
ret = -EBUSY;
goto unlock_exit;
}
table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
if (table_group_tmp->ops->create_table !=
table_group->ops->create_table) {
pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
iommu_group_id(iommu_group),
iommu_group_id(tcegrp->grp));
ret = -EPERM;
goto unlock_exit;
}
}
tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
if (!tcegrp) {
ret = -ENOMEM;
goto unlock_exit;
}
ret = tce_iommu_take_ownership(container, table_group);
if (!tce_groups_attached(container) && !container->tables[0])
container->def_window_pending = true;
if (!ret) {
tcegrp->grp = iommu_group;
list_add(&tcegrp->next, &container->group_list);
}
if (ret && tcegrp)
kfree(tcegrp);
unlock_exit:
mutex_unlock(&container->lock);
return ret;
}
static void tce_iommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct tce_container *container = iommu_data;
struct iommu_table_group *table_group;
bool found = false;
struct tce_iommu_group *tcegrp;
mutex_lock(&container->lock);
list_for_each_entry(tcegrp, &container->group_list, next) {
if (tcegrp->grp == iommu_group) {
found = true;
break;
}
}
if (!found) {
pr_warn("tce_vfio: detaching unattached group #%u\n",
iommu_group_id(iommu_group));
goto unlock_exit;
}
list_del(&tcegrp->next);
kfree(tcegrp);
table_group = iommu_group_get_iommudata(iommu_group);
BUG_ON(!table_group);
tce_iommu_release_ownership(container, table_group);
unlock_exit:
mutex_unlock(&container->lock);
}
static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
.name = "iommu-vfio-powerpc",
.owner = THIS_MODULE,
.open = tce_iommu_open,
.release = tce_iommu_release,
.ioctl = tce_iommu_ioctl,
.attach_group = tce_iommu_attach_group,
.detach_group = tce_iommu_detach_group,
};
static int __init tce_iommu_init(void)
{
return vfio_register_iommu_driver(&tce_iommu_driver_ops);
}
static void __exit tce_iommu_cleanup(void)
{
vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
}
module_init(tce_iommu_init);
module_exit(tce_iommu_cleanup);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/vfio_iommu_spapr_tce.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO generic eventfd code for IRQFD support.
* Derived from drivers/vfio/pci/vfio_pci_intrs.c
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*/
#include <linux/vfio.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "vfio.h"
static struct workqueue_struct *vfio_irqfd_cleanup_wq;
static DEFINE_SPINLOCK(virqfd_lock);
int __init vfio_virqfd_init(void)
{
vfio_irqfd_cleanup_wq =
create_singlethread_workqueue("vfio-irqfd-cleanup");
if (!vfio_irqfd_cleanup_wq)
return -ENOMEM;
return 0;
}
void vfio_virqfd_exit(void)
{
destroy_workqueue(vfio_irqfd_cleanup_wq);
}
static void virqfd_deactivate(struct virqfd *virqfd)
{
queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
}
static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
__poll_t flags = key_to_poll(key);
if (flags & EPOLLIN) {
u64 cnt;
eventfd_ctx_do_read(virqfd->eventfd, &cnt);
/* An event has been signaled, call function */
if ((!virqfd->handler ||
virqfd->handler(virqfd->opaque, virqfd->data)) &&
virqfd->thread)
schedule_work(&virqfd->inject);
}
if (flags & EPOLLHUP) {
unsigned long flags;
spin_lock_irqsave(&virqfd_lock, flags);
/*
* The eventfd is closing, if the virqfd has not yet been
* queued for release, as determined by testing whether the
* virqfd pointer to it is still valid, queue it now. As
* with kvm irqfds, we know we won't race against the virqfd
* going away because we hold the lock to get here.
*/
if (*(virqfd->pvirqfd) == virqfd) {
*(virqfd->pvirqfd) = NULL;
virqfd_deactivate(virqfd);
}
spin_unlock_irqrestore(&virqfd_lock, flags);
}
return 0;
}
static void virqfd_ptable_queue_proc(struct file *file,
wait_queue_head_t *wqh, poll_table *pt)
{
struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
add_wait_queue(wqh, &virqfd->wait);
}
static void virqfd_shutdown(struct work_struct *work)
{
struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
u64 cnt;
eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
flush_work(&virqfd->inject);
eventfd_ctx_put(virqfd->eventfd);
kfree(virqfd);
}
static void virqfd_inject(struct work_struct *work)
{
struct virqfd *virqfd = container_of(work, struct virqfd, inject);
if (virqfd->thread)
virqfd->thread(virqfd->opaque, virqfd->data);
}
int vfio_virqfd_enable(void *opaque,
int (*handler)(void *, void *),
void (*thread)(void *, void *),
void *data, struct virqfd **pvirqfd, int fd)
{
struct fd irqfd;
struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
__poll_t events;
virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL_ACCOUNT);
if (!virqfd)
return -ENOMEM;
virqfd->pvirqfd = pvirqfd;
virqfd->opaque = opaque;
virqfd->handler = handler;
virqfd->thread = thread;
virqfd->data = data;
INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
INIT_WORK(&virqfd->inject, virqfd_inject);
irqfd = fdget(fd);
if (!irqfd.file) {
ret = -EBADF;
goto err_fd;
}
ctx = eventfd_ctx_fileget(irqfd.file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto err_ctx;
}
virqfd->eventfd = ctx;
/*
* virqfds can be released by closing the eventfd or directly
* through ioctl. These are both done through a workqueue, so
* we update the pointer to the virqfd under lock to avoid
* pushing multiple jobs to release the same virqfd.
*/
spin_lock_irq(&virqfd_lock);
if (*pvirqfd) {
spin_unlock_irq(&virqfd_lock);
ret = -EBUSY;
goto err_busy;
}
*pvirqfd = virqfd;
spin_unlock_irq(&virqfd_lock);
/*
* Install our own custom wake-up handling so we are notified via
* a callback whenever someone signals the underlying eventfd.
*/
init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
events = vfs_poll(irqfd.file, &virqfd->pt);
/*
* Check if there was an event already pending on the eventfd
* before we registered and trigger it as if we didn't miss it.
*/
if (events & EPOLLIN) {
if ((!handler || handler(opaque, data)) && thread)
schedule_work(&virqfd->inject);
}
/*
* Do not drop the file until the irqfd is fully initialized,
* otherwise we might race against the EPOLLHUP.
*/
fdput(irqfd);
return 0;
err_busy:
eventfd_ctx_put(ctx);
err_ctx:
fdput(irqfd);
err_fd:
kfree(virqfd);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
void vfio_virqfd_disable(struct virqfd **pvirqfd)
{
unsigned long flags;
spin_lock_irqsave(&virqfd_lock, flags);
if (*pvirqfd) {
virqfd_deactivate(*pvirqfd);
*pvirqfd = NULL;
}
spin_unlock_irqrestore(&virqfd_lock, flags);
/*
* Block until we know all outstanding shutdown jobs have completed.
* Even if we don't queue the job, flush the wq to be sure it's
* been released.
*/
flush_workqueue(vfio_irqfd_cleanup_wq);
}
EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
| linux-master | drivers/vfio/virqfd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/vfio.h>
#include <linux/iommufd.h>
#include "vfio.h"
MODULE_IMPORT_NS(IOMMUFD);
MODULE_IMPORT_NS(IOMMUFD_VFIO);
bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
struct iommufd_ctx *ictx)
{
u32 ioas_id;
return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
}
int vfio_df_iommufd_bind(struct vfio_device_file *df)
{
struct vfio_device *vdev = df->device;
struct iommufd_ctx *ictx = df->iommufd;
lockdep_assert_held(&vdev->dev_set->lock);
return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
}
int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
struct iommufd_ctx *ictx)
{
u32 ioas_id;
int ret;
lockdep_assert_held(&vdev->dev_set->lock);
/* compat noiommu does not need to do ioas attach */
if (vfio_device_is_noiommu(vdev))
return 0;
ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
if (ret)
return ret;
/* The legacy path has no way to return the selected pt_id */
return vdev->ops->attach_ioas(vdev, &ioas_id);
}
void vfio_df_iommufd_unbind(struct vfio_device_file *df)
{
struct vfio_device *vdev = df->device;
lockdep_assert_held(&vdev->dev_set->lock);
if (vfio_device_is_noiommu(vdev))
return;
if (vdev->ops->unbind_iommufd)
vdev->ops->unbind_iommufd(vdev);
}
struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
{
if (vdev->iommufd_device)
return iommufd_device_to_ictx(vdev->iommufd_device);
return NULL;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
static int vfio_iommufd_device_id(struct vfio_device *vdev)
{
if (vdev->iommufd_device)
return iommufd_device_to_id(vdev->iommufd_device);
return -EINVAL;
}
/*
* Return devid for a device.
* valid ID for the device that is owned by the ictx
* -ENOENT = device is owned but there is no ID
* -ENODEV or other error = device is not owned
*/
int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
{
struct iommu_group *group;
int devid;
if (vfio_iommufd_device_ictx(vdev) == ictx)
return vfio_iommufd_device_id(vdev);
group = iommu_group_get(vdev->dev);
if (!group)
return -ENODEV;
if (iommufd_ctx_has_group(ictx, group))
devid = -ENOENT;
else
devid = -ENODEV;
iommu_group_put(group);
return devid;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
/*
* The physical standard ops mean that the iommufd_device is bound to the
* physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
* using this ops set should call vfio_register_group_dev()
*/
int vfio_iommufd_physical_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id)
{
struct iommufd_device *idev;
idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
if (IS_ERR(idev))
return PTR_ERR(idev);
vdev->iommufd_device = idev;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
{
lockdep_assert_held(&vdev->dev_set->lock);
if (vdev->iommufd_attached) {
iommufd_device_detach(vdev->iommufd_device);
vdev->iommufd_attached = false;
}
iommufd_device_unbind(vdev->iommufd_device);
vdev->iommufd_device = NULL;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
{
int rc;
lockdep_assert_held(&vdev->dev_set->lock);
if (WARN_ON(!vdev->iommufd_device))
return -EINVAL;
if (vdev->iommufd_attached)
rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
else
rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
if (rc)
return rc;
vdev->iommufd_attached = true;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
{
lockdep_assert_held(&vdev->dev_set->lock);
if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
return;
iommufd_device_detach(vdev->iommufd_device);
vdev->iommufd_attached = false;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
/*
* The emulated standard ops mean that vfio_device is going to use the
* "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
* ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
* not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
*/
static void vfio_emulated_unmap(void *data, unsigned long iova,
unsigned long length)
{
struct vfio_device *vdev = data;
if (vdev->ops->dma_unmap)
vdev->ops->dma_unmap(vdev, iova, length);
}
static const struct iommufd_access_ops vfio_user_ops = {
.needs_pin_pages = 1,
.unmap = vfio_emulated_unmap,
};
int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id)
{
struct iommufd_access *user;
lockdep_assert_held(&vdev->dev_set->lock);
user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
if (IS_ERR(user))
return PTR_ERR(user);
vdev->iommufd_access = user;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
{
lockdep_assert_held(&vdev->dev_set->lock);
if (vdev->iommufd_access) {
iommufd_access_destroy(vdev->iommufd_access);
vdev->iommufd_attached = false;
vdev->iommufd_access = NULL;
}
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
{
int rc;
lockdep_assert_held(&vdev->dev_set->lock);
if (vdev->iommufd_attached)
rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
else
rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
if (rc)
return rc;
vdev->iommufd_attached = true;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
{
lockdep_assert_held(&vdev->dev_set->lock);
if (WARN_ON(!vdev->iommufd_access) ||
!vdev->iommufd_attached)
return;
iommufd_access_detach(vdev->iommufd_access);
vdev->iommufd_attached = false;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);
| linux-master | drivers/vfio/iommufd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022, Oracle and/or its affiliates.
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
#include <linux/iova_bitmap.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
/*
* struct iova_bitmap_map - A bitmap representing an IOVA range
*
* Main data structure for tracking mapped user pages of bitmap data.
*
* For example, for something recording dirty IOVAs, it will be provided a
* struct iova_bitmap structure, as a general structure for iterating the
* total IOVA range. The struct iova_bitmap_map, though, represents the
* subset of said IOVA space that is pinned by its parent structure (struct
* iova_bitmap).
*
* The user does not need to exact location of the bits in the bitmap.
* From user perspective the only API available is iova_bitmap_set() which
* records the IOVA *range* in the bitmap by setting the corresponding
* bits.
*
* The bitmap is an array of u64 whereas each bit represents an IOVA of
* range of (1 << pgshift). Thus formula for the bitmap data to be set is:
*
* data[(iova / page_size) / 64] & (1ULL << (iova % 64))
*/
struct iova_bitmap_map {
/* base IOVA representing bit 0 of the first page */
unsigned long iova;
/* page size order that each bit granules to */
unsigned long pgshift;
/* page offset of the first user page pinned */
unsigned long pgoff;
/* number of pages pinned */
unsigned long npages;
/* pinned pages representing the bitmap data */
struct page **pages;
};
/*
* struct iova_bitmap - The IOVA bitmap object
*
* Main data structure for iterating over the bitmap data.
*
* Abstracts the pinning work and iterates in IOVA ranges.
* It uses a windowing scheme and pins the bitmap in relatively
* big ranges e.g.
*
* The bitmap object uses one base page to store all the pinned pages
* pointers related to the bitmap. For sizeof(struct page*) == 8 it stores
* 512 struct page pointers which, if the base page size is 4K, it means
* 2M of bitmap data is pinned at a time. If the iova_bitmap page size is
* also 4K then the range window to iterate is 64G.
*
* For example iterating on a total IOVA range of 4G..128G, it will walk
* through this set of ranges:
*
* 4G - 68G-1 (64G)
* 68G - 128G-1 (64G)
*
* An example of the APIs on how to use/iterate over the IOVA bitmap:
*
* bitmap = iova_bitmap_alloc(iova, length, page_size, data);
* if (IS_ERR(bitmap))
* return PTR_ERR(bitmap);
*
* ret = iova_bitmap_for_each(bitmap, arg, dirty_reporter_fn);
*
* iova_bitmap_free(bitmap);
*
* Each iteration of the @dirty_reporter_fn is called with a unique @iova
* and @length argument, indicating the current range available through the
* iova_bitmap. The @dirty_reporter_fn uses iova_bitmap_set() to mark dirty
* areas (@iova_length) within that provided range, as following:
*
* iova_bitmap_set(bitmap, iova, iova_length);
*
* The internals of the object uses an index @mapped_base_index that indexes
* which u64 word of the bitmap is mapped, up to @mapped_total_index.
* Those keep being incremented until @mapped_total_index is reached while
* mapping up to PAGE_SIZE / sizeof(struct page*) maximum of pages.
*
* The IOVA bitmap is usually located on what tracks DMA mapped ranges or
* some form of IOVA range tracking that co-relates to the user passed
* bitmap.
*/
struct iova_bitmap {
/* IOVA range representing the currently mapped bitmap data */
struct iova_bitmap_map mapped;
/* userspace address of the bitmap */
u64 __user *bitmap;
/* u64 index that @mapped points to */
unsigned long mapped_base_index;
/* how many u64 can we walk in total */
unsigned long mapped_total_index;
/* base IOVA of the whole bitmap */
unsigned long iova;
/* length of the IOVA range for the whole bitmap */
size_t length;
};
/*
* Converts a relative IOVA to a bitmap index.
* This function provides the index into the u64 array (bitmap::bitmap)
* for a given IOVA offset.
* Relative IOVA means relative to the bitmap::mapped base IOVA
* (stored in mapped::iova). All computations in this file are done using
* relative IOVAs and thus avoid an extra subtraction against mapped::iova.
* The user API iova_bitmap_set() always uses a regular absolute IOVAs.
*/
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
unsigned long iova)
{
unsigned long pgsize = 1 << bitmap->mapped.pgshift;
return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
}
/*
* Converts a bitmap index to a *relative* IOVA.
*/
static unsigned long iova_bitmap_index_to_offset(struct iova_bitmap *bitmap,
unsigned long index)
{
unsigned long pgshift = bitmap->mapped.pgshift;
return (index * BITS_PER_TYPE(*bitmap->bitmap)) << pgshift;
}
/*
* Returns the base IOVA of the mapped range.
*/
static unsigned long iova_bitmap_mapped_iova(struct iova_bitmap *bitmap)
{
unsigned long skip = bitmap->mapped_base_index;
return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip);
}
/*
* Pins the bitmap user pages for the current range window.
* This is internal to IOVA bitmap and called when advancing the
* index (@mapped_base_index) or allocating the bitmap.
*/
static int iova_bitmap_get(struct iova_bitmap *bitmap)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
unsigned long npages;
u64 __user *addr;
long ret;
/*
* @mapped_base_index is the index of the currently mapped u64 words
* that we have access. Anything before @mapped_base_index is not
* mapped. The range @mapped_base_index .. @mapped_total_index-1 is
* mapped but capped at a maximum number of pages.
*/
npages = DIV_ROUND_UP((bitmap->mapped_total_index -
bitmap->mapped_base_index) *
sizeof(*bitmap->bitmap), PAGE_SIZE);
/*
* We always cap at max number of 'struct page' a base page can fit.
* This is, for example, on x86 means 2M of bitmap data max.
*/
npages = min(npages, PAGE_SIZE / sizeof(struct page *));
/*
* Bitmap address to be pinned is calculated via pointer arithmetic
* with bitmap u64 word index.
*/
addr = bitmap->bitmap + bitmap->mapped_base_index;
ret = pin_user_pages_fast((unsigned long)addr, npages,
FOLL_WRITE, mapped->pages);
if (ret <= 0)
return -EFAULT;
mapped->npages = (unsigned long)ret;
/* Base IOVA where @pages point to i.e. bit 0 of the first page */
mapped->iova = iova_bitmap_mapped_iova(bitmap);
/*
* offset of the page where pinned pages bit 0 is located.
* This handles the case where the bitmap is not PAGE_SIZE
* aligned.
*/
mapped->pgoff = offset_in_page(addr);
return 0;
}
/*
* Unpins the bitmap user pages and clears @npages
* (un)pinning is abstracted from API user and it's done when advancing
* the index or freeing the bitmap.
*/
static void iova_bitmap_put(struct iova_bitmap *bitmap)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
if (mapped->npages) {
unpin_user_pages(mapped->pages, mapped->npages);
mapped->npages = 0;
}
}
/**
* iova_bitmap_alloc() - Allocates an IOVA bitmap object
* @iova: Start address of the IOVA range
* @length: Length of the IOVA range
* @page_size: Page size of the IOVA bitmap. It defines what each bit
* granularity represents
* @data: Userspace address of the bitmap
*
* Allocates an IOVA object and initializes all its fields including the
* first user pages of @data.
*
* Return: A pointer to a newly allocated struct iova_bitmap
* or ERR_PTR() on error.
*/
struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
unsigned long page_size, u64 __user *data)
{
struct iova_bitmap_map *mapped;
struct iova_bitmap *bitmap;
int rc;
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
return ERR_PTR(-ENOMEM);
mapped = &bitmap->mapped;
mapped->pgshift = __ffs(page_size);
bitmap->bitmap = data;
bitmap->mapped_total_index =
iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
bitmap->iova = iova;
bitmap->length = length;
mapped->iova = iova;
mapped->pages = (struct page **)__get_free_page(GFP_KERNEL);
if (!mapped->pages) {
rc = -ENOMEM;
goto err;
}
rc = iova_bitmap_get(bitmap);
if (rc)
goto err;
return bitmap;
err:
iova_bitmap_free(bitmap);
return ERR_PTR(rc);
}
/**
* iova_bitmap_free() - Frees an IOVA bitmap object
* @bitmap: IOVA bitmap to free
*
* It unpins and releases pages array memory and clears any leftover
* state.
*/
void iova_bitmap_free(struct iova_bitmap *bitmap)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
iova_bitmap_put(bitmap);
if (mapped->pages) {
free_page((unsigned long)mapped->pages);
mapped->pages = NULL;
}
kfree(bitmap);
}
/*
* Returns the remaining bitmap indexes from mapped_total_index to process for
* the currently pinned bitmap pages.
*/
static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
{
unsigned long remaining, bytes;
bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff;
remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
remaining = min_t(unsigned long, remaining,
bytes / sizeof(*bitmap->bitmap));
return remaining;
}
/*
* Returns the length of the mapped IOVA range.
*/
static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap)
{
unsigned long max_iova = bitmap->iova + bitmap->length - 1;
unsigned long iova = iova_bitmap_mapped_iova(bitmap);
unsigned long remaining;
/*
* iova_bitmap_mapped_remaining() returns a number of indexes which
* when converted to IOVA gives us a max length that the bitmap
* pinned data can cover. Afterwards, that is capped to
* only cover the IOVA range in @bitmap::iova .. @bitmap::length.
*/
remaining = iova_bitmap_index_to_offset(bitmap,
iova_bitmap_mapped_remaining(bitmap));
if (iova + remaining - 1 > max_iova)
remaining -= ((iova + remaining - 1) - max_iova);
return remaining;
}
/*
* Returns true if there's not more data to iterate.
*/
static bool iova_bitmap_done(struct iova_bitmap *bitmap)
{
return bitmap->mapped_base_index >= bitmap->mapped_total_index;
}
/*
* Advances to the next range, releases the current pinned
* pages and pins the next set of bitmap pages.
* Returns 0 on success or otherwise errno.
*/
static int iova_bitmap_advance(struct iova_bitmap *bitmap)
{
unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1;
unsigned long count = iova_bitmap_offset_to_index(bitmap, iova) + 1;
bitmap->mapped_base_index += count;
iova_bitmap_put(bitmap);
if (iova_bitmap_done(bitmap))
return 0;
/* When advancing the index we pin the next set of bitmap pages */
return iova_bitmap_get(bitmap);
}
/**
* iova_bitmap_for_each() - Iterates over the bitmap
* @bitmap: IOVA bitmap to iterate
* @opaque: Additional argument to pass to the callback
* @fn: Function that gets called for each IOVA range
*
* Helper function to iterate over bitmap data representing a portion of IOVA
* space. It hides the complexity of iterating bitmaps and translating the
* mapped bitmap user pages into IOVA ranges to process.
*
* Return: 0 on success, and an error on failure either upon
* iteration or when the callback returns an error.
*/
int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
iova_bitmap_fn_t fn)
{
int ret = 0;
for (; !iova_bitmap_done(bitmap) && !ret;
ret = iova_bitmap_advance(bitmap)) {
ret = fn(bitmap, iova_bitmap_mapped_iova(bitmap),
iova_bitmap_mapped_length(bitmap), opaque);
if (ret)
break;
}
return ret;
}
/**
* iova_bitmap_set() - Records an IOVA range in bitmap
* @bitmap: IOVA bitmap
* @iova: IOVA to start
* @length: IOVA range length
*
* Set the bits corresponding to the range [iova .. iova+length-1] in
* the user bitmap.
*
*/
void iova_bitmap_set(struct iova_bitmap *bitmap,
unsigned long iova, size_t length)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
unsigned long cur_bit = ((iova - mapped->iova) >>
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
do {
unsigned int page_idx = cur_bit / BITS_PER_PAGE;
unsigned int offset = cur_bit % BITS_PER_PAGE;
unsigned int nbits = min(BITS_PER_PAGE - offset,
last_bit - cur_bit + 1);
void *kaddr;
kaddr = kmap_local_page(mapped->pages[page_idx]);
bitmap_set(kaddr, offset, nbits);
kunmap_local(kaddr);
cur_bit += nbits;
} while (cur_bit <= last_bit);
}
EXPORT_SYMBOL_GPL(iova_bitmap_set);
| linux-master | drivers/vfio/iova_bitmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
*
* VFIO container (/dev/vfio/vfio)
*/
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/iommu.h>
#include <linux/miscdevice.h>
#include <linux/vfio.h>
#include <uapi/linux/vfio.h>
#include "vfio.h"
struct vfio_container {
struct kref kref;
struct list_head group_list;
struct rw_semaphore group_lock;
struct vfio_iommu_driver *iommu_driver;
void *iommu_data;
bool noiommu;
};
static struct vfio {
struct list_head iommu_drivers_list;
struct mutex iommu_drivers_lock;
} vfio;
static void *vfio_noiommu_open(unsigned long arg)
{
if (arg != VFIO_NOIOMMU_IOMMU)
return ERR_PTR(-EINVAL);
if (!capable(CAP_SYS_RAWIO))
return ERR_PTR(-EPERM);
return NULL;
}
static void vfio_noiommu_release(void *iommu_data)
{
}
static long vfio_noiommu_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
if (cmd == VFIO_CHECK_EXTENSION)
return vfio_noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
return -ENOTTY;
}
static int vfio_noiommu_attach_group(void *iommu_data,
struct iommu_group *iommu_group, enum vfio_group_type type)
{
return 0;
}
static void vfio_noiommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
}
static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
.name = "vfio-noiommu",
.owner = THIS_MODULE,
.open = vfio_noiommu_open,
.release = vfio_noiommu_release,
.ioctl = vfio_noiommu_ioctl,
.attach_group = vfio_noiommu_attach_group,
.detach_group = vfio_noiommu_detach_group,
};
/*
* Only noiommu containers can use vfio-noiommu and noiommu containers can only
* use vfio-noiommu.
*/
static bool vfio_iommu_driver_allowed(struct vfio_container *container,
const struct vfio_iommu_driver *driver)
{
if (!IS_ENABLED(CONFIG_VFIO_NOIOMMU))
return true;
return container->noiommu == (driver->ops == &vfio_noiommu_ops);
}
/*
* IOMMU driver registration
*/
int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
{
struct vfio_iommu_driver *driver, *tmp;
if (WARN_ON(!ops->register_device != !ops->unregister_device))
return -EINVAL;
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return -ENOMEM;
driver->ops = ops;
mutex_lock(&vfio.iommu_drivers_lock);
/* Check for duplicates */
list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
if (tmp->ops == ops) {
mutex_unlock(&vfio.iommu_drivers_lock);
kfree(driver);
return -EINVAL;
}
}
list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
mutex_unlock(&vfio.iommu_drivers_lock);
return 0;
}
EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
{
struct vfio_iommu_driver *driver;
mutex_lock(&vfio.iommu_drivers_lock);
list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
if (driver->ops == ops) {
list_del(&driver->vfio_next);
mutex_unlock(&vfio.iommu_drivers_lock);
kfree(driver);
return;
}
}
mutex_unlock(&vfio.iommu_drivers_lock);
}
EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
/*
* Container objects - containers are created when /dev/vfio/vfio is
* opened, but their lifecycle extends until the last user is done, so
* it's freed via kref. Must support container/group/device being
* closed in any order.
*/
static void vfio_container_release(struct kref *kref)
{
struct vfio_container *container;
container = container_of(kref, struct vfio_container, kref);
kfree(container);
}
static void vfio_container_get(struct vfio_container *container)
{
kref_get(&container->kref);
}
static void vfio_container_put(struct vfio_container *container)
{
kref_put(&container->kref, vfio_container_release);
}
void vfio_device_container_register(struct vfio_device *device)
{
struct vfio_iommu_driver *iommu_driver =
device->group->container->iommu_driver;
if (iommu_driver && iommu_driver->ops->register_device)
iommu_driver->ops->register_device(
device->group->container->iommu_data, device);
}
void vfio_device_container_unregister(struct vfio_device *device)
{
struct vfio_iommu_driver *iommu_driver =
device->group->container->iommu_driver;
if (iommu_driver && iommu_driver->ops->unregister_device)
iommu_driver->ops->unregister_device(
device->group->container->iommu_data, device);
}
static long
vfio_container_ioctl_check_extension(struct vfio_container *container,
unsigned long arg)
{
struct vfio_iommu_driver *driver;
long ret = 0;
down_read(&container->group_lock);
driver = container->iommu_driver;
switch (arg) {
/* No base extensions yet */
default:
/*
* If no driver is set, poll all registered drivers for
* extensions and return the first positive result. If
* a driver is already set, further queries will be passed
* only to that driver.
*/
if (!driver) {
mutex_lock(&vfio.iommu_drivers_lock);
list_for_each_entry(driver, &vfio.iommu_drivers_list,
vfio_next) {
if (!list_empty(&container->group_list) &&
!vfio_iommu_driver_allowed(container,
driver))
continue;
if (!try_module_get(driver->ops->owner))
continue;
ret = driver->ops->ioctl(NULL,
VFIO_CHECK_EXTENSION,
arg);
module_put(driver->ops->owner);
if (ret > 0)
break;
}
mutex_unlock(&vfio.iommu_drivers_lock);
} else
ret = driver->ops->ioctl(container->iommu_data,
VFIO_CHECK_EXTENSION, arg);
}
up_read(&container->group_lock);
return ret;
}
/* hold write lock on container->group_lock */
static int __vfio_container_attach_groups(struct vfio_container *container,
struct vfio_iommu_driver *driver,
void *data)
{
struct vfio_group *group;
int ret = -ENODEV;
list_for_each_entry(group, &container->group_list, container_next) {
ret = driver->ops->attach_group(data, group->iommu_group,
group->type);
if (ret)
goto unwind;
}
return ret;
unwind:
list_for_each_entry_continue_reverse(group, &container->group_list,
container_next) {
driver->ops->detach_group(data, group->iommu_group);
}
return ret;
}
static long vfio_ioctl_set_iommu(struct vfio_container *container,
unsigned long arg)
{
struct vfio_iommu_driver *driver;
long ret = -ENODEV;
down_write(&container->group_lock);
/*
* The container is designed to be an unprivileged interface while
* the group can be assigned to specific users. Therefore, only by
* adding a group to a container does the user get the privilege of
* enabling the iommu, which may allocate finite resources. There
* is no unset_iommu, but by removing all the groups from a container,
* the container is deprivileged and returns to an unset state.
*/
if (list_empty(&container->group_list) || container->iommu_driver) {
up_write(&container->group_lock);
return -EINVAL;
}
mutex_lock(&vfio.iommu_drivers_lock);
list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
void *data;
if (!vfio_iommu_driver_allowed(container, driver))
continue;
if (!try_module_get(driver->ops->owner))
continue;
/*
* The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
* so test which iommu driver reported support for this
* extension and call open on them. We also pass them the
* magic, allowing a single driver to support multiple
* interfaces if they'd like.
*/
if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
module_put(driver->ops->owner);
continue;
}
data = driver->ops->open(arg);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
module_put(driver->ops->owner);
continue;
}
ret = __vfio_container_attach_groups(container, driver, data);
if (ret) {
driver->ops->release(data);
module_put(driver->ops->owner);
continue;
}
container->iommu_driver = driver;
container->iommu_data = data;
break;
}
mutex_unlock(&vfio.iommu_drivers_lock);
up_write(&container->group_lock);
return ret;
}
static long vfio_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct vfio_container *container = filep->private_data;
struct vfio_iommu_driver *driver;
void *data;
long ret = -EINVAL;
if (!container)
return ret;
switch (cmd) {
case VFIO_GET_API_VERSION:
ret = VFIO_API_VERSION;
break;
case VFIO_CHECK_EXTENSION:
ret = vfio_container_ioctl_check_extension(container, arg);
break;
case VFIO_SET_IOMMU:
ret = vfio_ioctl_set_iommu(container, arg);
break;
default:
driver = container->iommu_driver;
data = container->iommu_data;
if (driver) /* passthrough all unrecognized ioctls */
ret = driver->ops->ioctl(data, cmd, arg);
}
return ret;
}
static int vfio_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_container *container;
container = kzalloc(sizeof(*container), GFP_KERNEL_ACCOUNT);
if (!container)
return -ENOMEM;
INIT_LIST_HEAD(&container->group_list);
init_rwsem(&container->group_lock);
kref_init(&container->kref);
filep->private_data = container;
return 0;
}
static int vfio_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_container *container = filep->private_data;
filep->private_data = NULL;
vfio_container_put(container);
return 0;
}
static const struct file_operations vfio_fops = {
.owner = THIS_MODULE,
.open = vfio_fops_open,
.release = vfio_fops_release,
.unlocked_ioctl = vfio_fops_unl_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
struct vfio_container *vfio_container_from_file(struct file *file)
{
struct vfio_container *container;
/* Sanity check, is this really our fd? */
if (file->f_op != &vfio_fops)
return NULL;
container = file->private_data;
WARN_ON(!container); /* fget ensures we don't race vfio_release */
return container;
}
static struct miscdevice vfio_dev = {
.minor = VFIO_MINOR,
.name = "vfio",
.fops = &vfio_fops,
.nodename = "vfio/vfio",
.mode = S_IRUGO | S_IWUGO,
};
int vfio_container_attach_group(struct vfio_container *container,
struct vfio_group *group)
{
struct vfio_iommu_driver *driver;
int ret = 0;
lockdep_assert_held(&group->group_lock);
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
return -EPERM;
down_write(&container->group_lock);
/* Real groups and fake groups cannot mix */
if (!list_empty(&container->group_list) &&
container->noiommu != (group->type == VFIO_NO_IOMMU)) {
ret = -EPERM;
goto out_unlock_container;
}
if (group->type == VFIO_IOMMU) {
ret = iommu_group_claim_dma_owner(group->iommu_group, group);
if (ret)
goto out_unlock_container;
}
driver = container->iommu_driver;
if (driver) {
ret = driver->ops->attach_group(container->iommu_data,
group->iommu_group,
group->type);
if (ret) {
if (group->type == VFIO_IOMMU)
iommu_group_release_dma_owner(
group->iommu_group);
goto out_unlock_container;
}
}
group->container = container;
group->container_users = 1;
container->noiommu = (group->type == VFIO_NO_IOMMU);
list_add(&group->container_next, &container->group_list);
/* Get a reference on the container and mark a user within the group */
vfio_container_get(container);
out_unlock_container:
up_write(&container->group_lock);
return ret;
}
void vfio_group_detach_container(struct vfio_group *group)
{
struct vfio_container *container = group->container;
struct vfio_iommu_driver *driver;
lockdep_assert_held(&group->group_lock);
WARN_ON(group->container_users != 1);
down_write(&container->group_lock);
driver = container->iommu_driver;
if (driver)
driver->ops->detach_group(container->iommu_data,
group->iommu_group);
if (group->type == VFIO_IOMMU)
iommu_group_release_dma_owner(group->iommu_group);
group->container = NULL;
group->container_users = 0;
list_del(&group->container_next);
/* Detaching the last group deprivileges a container, remove iommu */
if (driver && list_empty(&container->group_list)) {
driver->ops->release(container->iommu_data);
module_put(driver->ops->owner);
container->iommu_driver = NULL;
container->iommu_data = NULL;
}
up_write(&container->group_lock);
vfio_container_put(container);
}
int vfio_group_use_container(struct vfio_group *group)
{
lockdep_assert_held(&group->group_lock);
/*
* The container fd has been assigned with VFIO_GROUP_SET_CONTAINER but
* VFIO_SET_IOMMU hasn't been done yet.
*/
if (!group->container->iommu_driver)
return -EINVAL;
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
return -EPERM;
get_file(group->opened_file);
group->container_users++;
return 0;
}
void vfio_group_unuse_container(struct vfio_group *group)
{
lockdep_assert_held(&group->group_lock);
WARN_ON(group->container_users <= 1);
group->container_users--;
fput(group->opened_file);
}
int vfio_device_container_pin_pages(struct vfio_device *device,
dma_addr_t iova, int npage,
int prot, struct page **pages)
{
struct vfio_container *container = device->group->container;
struct iommu_group *iommu_group = device->group->iommu_group;
struct vfio_iommu_driver *driver = container->iommu_driver;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;
if (unlikely(!driver || !driver->ops->pin_pages))
return -ENOTTY;
return driver->ops->pin_pages(container->iommu_data, iommu_group, iova,
npage, prot, pages);
}
void vfio_device_container_unpin_pages(struct vfio_device *device,
dma_addr_t iova, int npage)
{
struct vfio_container *container = device->group->container;
if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
return;
container->iommu_driver->ops->unpin_pages(container->iommu_data, iova,
npage);
}
int vfio_device_container_dma_rw(struct vfio_device *device,
dma_addr_t iova, void *data,
size_t len, bool write)
{
struct vfio_container *container = device->group->container;
struct vfio_iommu_driver *driver = container->iommu_driver;
if (unlikely(!driver || !driver->ops->dma_rw))
return -ENOTTY;
return driver->ops->dma_rw(container->iommu_data, iova, data, len,
write);
}
int __init vfio_container_init(void)
{
int ret;
mutex_init(&vfio.iommu_drivers_lock);
INIT_LIST_HEAD(&vfio.iommu_drivers_list);
ret = misc_register(&vfio_dev);
if (ret) {
pr_err("vfio: misc device register failed\n");
return ret;
}
if (IS_ENABLED(CONFIG_VFIO_NOIOMMU)) {
ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
if (ret)
goto err_misc;
}
return 0;
err_misc:
misc_deregister(&vfio_dev);
return ret;
}
void vfio_container_cleanup(void)
{
if (IS_ENABLED(CONFIG_VFIO_NOIOMMU))
vfio_unregister_iommu_driver(&vfio_noiommu_ops);
misc_deregister(&vfio_dev);
mutex_destroy(&vfio.iommu_drivers_lock);
}
MODULE_ALIAS_MISCDEV(VFIO_MINOR);
MODULE_ALIAS("devname:vfio/vfio");
| linux-master | drivers/vfio/container.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Intel Corporation.
*/
#include <linux/vfio.h>
#include <linux/iommufd.h>
#include "vfio.h"
static dev_t device_devt;
void vfio_init_device_cdev(struct vfio_device *device)
{
device->device.devt = MKDEV(MAJOR(device_devt), device->index);
cdev_init(&device->cdev, &vfio_device_fops);
device->cdev.owner = THIS_MODULE;
}
/*
* device access via the fd opened by this function is blocked until
* .open_device() is called successfully during BIND_IOMMUFD.
*/
int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
{
struct vfio_device *device = container_of(inode->i_cdev,
struct vfio_device, cdev);
struct vfio_device_file *df;
int ret;
/* Paired with the put in vfio_device_fops_release() */
if (!vfio_device_try_get_registration(device))
return -ENODEV;
df = vfio_allocate_device_file(device);
if (IS_ERR(df)) {
ret = PTR_ERR(df);
goto err_put_registration;
}
filep->private_data = df;
return 0;
err_put_registration:
vfio_device_put_registration(device);
return ret;
}
static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
{
spin_lock(&df->kvm_ref_lock);
vfio_device_get_kvm_safe(df->device, df->kvm);
spin_unlock(&df->kvm_ref_lock);
}
long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
struct vfio_device_bind_iommufd __user *arg)
{
struct vfio_device *device = df->device;
struct vfio_device_bind_iommufd bind;
unsigned long minsz;
int ret;
static_assert(__same_type(arg->out_devid, df->devid));
minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
if (copy_from_user(&bind, arg, minsz))
return -EFAULT;
if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
return -EINVAL;
/* BIND_IOMMUFD only allowed for cdev fds */
if (df->group)
return -EINVAL;
ret = vfio_device_block_group(device);
if (ret)
return ret;
mutex_lock(&device->dev_set->lock);
/* one device cannot be bound twice */
if (df->access_granted) {
ret = -EINVAL;
goto out_unlock;
}
df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
if (IS_ERR(df->iommufd)) {
ret = PTR_ERR(df->iommufd);
df->iommufd = NULL;
goto out_unlock;
}
/*
* Before the device open, get the KVM pointer currently
* associated with the device file (if there is) and obtain
* a reference. This reference is held until device closed.
* Save the pointer in the device for use by drivers.
*/
vfio_df_get_kvm_safe(df);
ret = vfio_df_open(df);
if (ret)
goto out_put_kvm;
ret = copy_to_user(&arg->out_devid, &df->devid,
sizeof(df->devid)) ? -EFAULT : 0;
if (ret)
goto out_close_device;
device->cdev_opened = true;
/*
* Paired with smp_load_acquire() in vfio_device_fops::ioctl/
* read/write/mmap
*/
smp_store_release(&df->access_granted, true);
mutex_unlock(&device->dev_set->lock);
return 0;
out_close_device:
vfio_df_close(df);
out_put_kvm:
vfio_device_put_kvm(device);
iommufd_ctx_put(df->iommufd);
df->iommufd = NULL;
out_unlock:
mutex_unlock(&device->dev_set->lock);
vfio_device_unblock_group(device);
return ret;
}
void vfio_df_unbind_iommufd(struct vfio_device_file *df)
{
struct vfio_device *device = df->device;
/*
* In the time of close, there is no contention with another one
* changing this flag. So read df->access_granted without lock
* and no smp_load_acquire() is ok.
*/
if (!df->access_granted)
return;
mutex_lock(&device->dev_set->lock);
vfio_df_close(df);
vfio_device_put_kvm(device);
iommufd_ctx_put(df->iommufd);
device->cdev_opened = false;
mutex_unlock(&device->dev_set->lock);
vfio_device_unblock_group(device);
}
int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
struct vfio_device_attach_iommufd_pt __user *arg)
{
struct vfio_device *device = df->device;
struct vfio_device_attach_iommufd_pt attach;
unsigned long minsz;
int ret;
minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
if (copy_from_user(&attach, arg, minsz))
return -EFAULT;
if (attach.argsz < minsz || attach.flags)
return -EINVAL;
mutex_lock(&device->dev_set->lock);
ret = device->ops->attach_ioas(device, &attach.pt_id);
if (ret)
goto out_unlock;
if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
ret = -EFAULT;
goto out_detach;
}
mutex_unlock(&device->dev_set->lock);
return 0;
out_detach:
device->ops->detach_ioas(device);
out_unlock:
mutex_unlock(&device->dev_set->lock);
return ret;
}
int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
struct vfio_device_detach_iommufd_pt __user *arg)
{
struct vfio_device *device = df->device;
struct vfio_device_detach_iommufd_pt detach;
unsigned long minsz;
minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
if (copy_from_user(&detach, arg, minsz))
return -EFAULT;
if (detach.argsz < minsz || detach.flags)
return -EINVAL;
mutex_lock(&device->dev_set->lock);
device->ops->detach_ioas(device);
mutex_unlock(&device->dev_set->lock);
return 0;
}
static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
}
int vfio_cdev_init(struct class *device_class)
{
device_class->devnode = vfio_device_devnode;
return alloc_chrdev_region(&device_devt, 0,
MINORMASK + 1, "vfio-dev");
}
void vfio_cdev_cleanup(void)
{
unregister_chrdev_region(device_devt, MINORMASK + 1);
}
| linux-master | drivers/vfio/device_cdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO: IOMMU DMA mapping support for Type1 IOMMU
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*
* We arbitrarily define a Type1 IOMMU as one matching the below code.
* It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
* VT-d, but that makes it harder to re-use as theoretically anyone
* implementing a similar IOMMU could make use of this. We expect the
* IOMMU to support the IOMMU API and have few to no restrictions around
* the IOVA range that can be mapped. The Type1 IOMMU is currently
* optimized for relatively static mappings of a userspace process with
* userspace pages pinned into memory. We also assume devices and IOMMU
* domains are PCI based as the IOMMU API is still centered around a
* device/bus interface rather than a group interface.
*/
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/rbtree.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include "vfio.h"
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <[email protected]>"
#define DRIVER_DESC "Type1 IOMMU driver for VFIO"
static bool allow_unsafe_interrupts;
module_param_named(allow_unsafe_interrupts,
allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(allow_unsafe_interrupts,
"Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
static bool disable_hugepages;
module_param_named(disable_hugepages,
disable_hugepages, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
static unsigned int dma_entry_limit __read_mostly = U16_MAX;
module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
MODULE_PARM_DESC(dma_entry_limit,
"Maximum number of user DMA mappings per container (65535).");
struct vfio_iommu {
struct list_head domain_list;
struct list_head iova_list;
struct mutex lock;
struct rb_root dma_list;
struct list_head device_list;
struct mutex device_list_lock;
unsigned int dma_avail;
unsigned int vaddr_invalid_count;
uint64_t pgsize_bitmap;
uint64_t num_non_pinned_groups;
bool v2;
bool nesting;
bool dirty_page_tracking;
struct list_head emulated_iommu_groups;
};
struct vfio_domain {
struct iommu_domain *domain;
struct list_head next;
struct list_head group_list;
bool fgsp : 1; /* Fine-grained super pages */
bool enforce_cache_coherency : 1;
};
struct vfio_dma {
struct rb_node node;
dma_addr_t iova; /* Device address */
unsigned long vaddr; /* Process virtual addr */
size_t size; /* Map size (bytes) */
int prot; /* IOMMU_READ/WRITE */
bool iommu_mapped;
bool lock_cap; /* capable(CAP_IPC_LOCK) */
bool vaddr_invalid;
struct task_struct *task;
struct rb_root pfn_list; /* Ex-user pinned pfn list */
unsigned long *bitmap;
struct mm_struct *mm;
size_t locked_vm;
};
struct vfio_batch {
struct page **pages; /* for pin_user_pages_remote */
struct page *fallback_page; /* if pages alloc fails */
int capacity; /* length of pages array */
int size; /* of batch currently */
int offset; /* of next entry in pages */
};
struct vfio_iommu_group {
struct iommu_group *iommu_group;
struct list_head next;
bool pinned_page_dirty_scope;
};
struct vfio_iova {
struct list_head list;
dma_addr_t start;
dma_addr_t end;
};
/*
* Guest RAM pinning working set or DMA target
*/
struct vfio_pfn {
struct rb_node node;
dma_addr_t iova; /* Device address */
unsigned long pfn; /* Host pfn */
unsigned int ref_count;
};
struct vfio_regions {
struct list_head list;
dma_addr_t iova;
phys_addr_t phys;
size_t len;
};
#define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
/*
* Input argument of number of bits to bitmap_set() is unsigned integer, which
* further casts to signed integer for unaligned multi-bit operation,
* __bitmap_set().
* Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
* that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
* system.
*/
#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
static int put_pfn(unsigned long pfn, int prot);
static struct vfio_iommu_group*
vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group);
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
*/
static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
dma_addr_t start, size_t size)
{
struct rb_node *node = iommu->dma_list.rb_node;
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
if (start + size <= dma->iova)
node = node->rb_left;
else if (start >= dma->iova + dma->size)
node = node->rb_right;
else
return dma;
}
return NULL;
}
static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
dma_addr_t start, u64 size)
{
struct rb_node *res = NULL;
struct rb_node *node = iommu->dma_list.rb_node;
struct vfio_dma *dma_res = NULL;
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
if (start < dma->iova + dma->size) {
res = node;
dma_res = dma;
if (start >= dma->iova)
break;
node = node->rb_left;
} else {
node = node->rb_right;
}
}
if (res && size && dma_res->iova >= start + size)
res = NULL;
return res;
}
static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
{
struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
struct vfio_dma *dma;
while (*link) {
parent = *link;
dma = rb_entry(parent, struct vfio_dma, node);
if (new->iova + new->size <= dma->iova)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&new->node, parent, link);
rb_insert_color(&new->node, &iommu->dma_list);
}
static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
{
rb_erase(&old->node, &iommu->dma_list);
}
static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
{
uint64_t npages = dma->size / pgsize;
if (npages > DIRTY_BITMAP_PAGES_MAX)
return -EINVAL;
/*
* Allocate extra 64 bits that are used to calculate shift required for
* bitmap_shift_left() to manipulate and club unaligned number of pages
* in adjacent vfio_dma ranges.
*/
dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64),
GFP_KERNEL);
if (!dma->bitmap)
return -ENOMEM;
return 0;
}
static void vfio_dma_bitmap_free(struct vfio_dma *dma)
{
kvfree(dma->bitmap);
dma->bitmap = NULL;
}
static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
{
struct rb_node *p;
unsigned long pgshift = __ffs(pgsize);
for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1);
}
}
static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu)
{
struct rb_node *n;
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
bitmap_set(dma->bitmap, 0, dma->size >> pgshift);
}
}
static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
{
struct rb_node *n;
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
int ret;
ret = vfio_dma_bitmap_alloc(dma, pgsize);
if (ret) {
struct rb_node *p;
for (p = rb_prev(n); p; p = rb_prev(p)) {
struct vfio_dma *dma = rb_entry(n,
struct vfio_dma, node);
vfio_dma_bitmap_free(dma);
}
return ret;
}
vfio_dma_populate_bitmap(dma, pgsize);
}
return 0;
}
static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
{
struct rb_node *n;
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
vfio_dma_bitmap_free(dma);
}
}
/*
* Helper Functions for host iova-pfn list
*/
static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
{
struct vfio_pfn *vpfn;
struct rb_node *node = dma->pfn_list.rb_node;
while (node) {
vpfn = rb_entry(node, struct vfio_pfn, node);
if (iova < vpfn->iova)
node = node->rb_left;
else if (iova > vpfn->iova)
node = node->rb_right;
else
return vpfn;
}
return NULL;
}
static void vfio_link_pfn(struct vfio_dma *dma,
struct vfio_pfn *new)
{
struct rb_node **link, *parent = NULL;
struct vfio_pfn *vpfn;
link = &dma->pfn_list.rb_node;
while (*link) {
parent = *link;
vpfn = rb_entry(parent, struct vfio_pfn, node);
if (new->iova < vpfn->iova)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&new->node, parent, link);
rb_insert_color(&new->node, &dma->pfn_list);
}
static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old)
{
rb_erase(&old->node, &dma->pfn_list);
}
static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
unsigned long pfn)
{
struct vfio_pfn *vpfn;
vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL);
if (!vpfn)
return -ENOMEM;
vpfn->iova = iova;
vpfn->pfn = pfn;
vpfn->ref_count = 1;
vfio_link_pfn(dma, vpfn);
return 0;
}
static void vfio_remove_from_pfn_list(struct vfio_dma *dma,
struct vfio_pfn *vpfn)
{
vfio_unlink_pfn(dma, vpfn);
kfree(vpfn);
}
static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
unsigned long iova)
{
struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
if (vpfn)
vpfn->ref_count++;
return vpfn;
}
static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
{
int ret = 0;
vpfn->ref_count--;
if (!vpfn->ref_count) {
ret = put_pfn(vpfn->pfn, dma->prot);
vfio_remove_from_pfn_list(dma, vpfn);
}
return ret;
}
static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm,
bool lock_cap, long npage)
{
int ret = mmap_write_lock_killable(mm);
if (ret)
return ret;
ret = __account_locked_vm(mm, abs(npage), npage > 0, task, lock_cap);
mmap_write_unlock(mm);
return ret;
}
static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
{
struct mm_struct *mm;
int ret;
if (!npage)
return 0;
mm = dma->mm;
if (async && !mmget_not_zero(mm))
return -ESRCH; /* process exited */
ret = mm_lock_acct(dma->task, mm, dma->lock_cap, npage);
if (!ret)
dma->locked_vm += npage;
if (async)
mmput(mm);
return ret;
}
/*
* Some mappings aren't backed by a struct page, for example an mmap'd
* MMIO range for our own or another device. These use a different
* pfn conversion and shouldn't be tracked as locked pages.
* For compound pages, any driver that sets the reserved bit in head
* page needs to set the reserved bit in all subpages to be safe.
*/
static bool is_invalid_reserved_pfn(unsigned long pfn)
{
if (pfn_valid(pfn))
return PageReserved(pfn_to_page(pfn));
return true;
}
static int put_pfn(unsigned long pfn, int prot)
{
if (!is_invalid_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE);
return 1;
}
return 0;
}
#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *))
static void vfio_batch_init(struct vfio_batch *batch)
{
batch->size = 0;
batch->offset = 0;
if (unlikely(disable_hugepages))
goto fallback;
batch->pages = (struct page **) __get_free_page(GFP_KERNEL);
if (!batch->pages)
goto fallback;
batch->capacity = VFIO_BATCH_MAX_CAPACITY;
return;
fallback:
batch->pages = &batch->fallback_page;
batch->capacity = 1;
}
static void vfio_batch_unpin(struct vfio_batch *batch, struct vfio_dma *dma)
{
while (batch->size) {
unsigned long pfn = page_to_pfn(batch->pages[batch->offset]);
put_pfn(pfn, dma->prot);
batch->offset++;
batch->size--;
}
}
static void vfio_batch_fini(struct vfio_batch *batch)
{
if (batch->capacity == VFIO_BATCH_MAX_CAPACITY)
free_page((unsigned long)batch->pages);
}
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
pte_t *ptep;
pte_t pte;
spinlock_t *ptl;
int ret;
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
ret = fixup_user_fault(mm, vaddr,
FAULT_FLAG_REMOTE |
(write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked);
if (unlocked)
return -EAGAIN;
if (ret)
return ret;
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret)
return ret;
}
pte = ptep_get(ptep);
if (write_fault && !pte_write(pte))
ret = -EFAULT;
else
*pfn = pte_pfn(pte);
pte_unmap_unlock(ptep, ptl);
return ret;
}
/*
* Returns the positive number of pfns successfully obtained or a negative
* error code.
*/
static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
long npages, int prot, unsigned long *pfn,
struct page **pages)
{
struct vm_area_struct *vma;
unsigned int flags = 0;
int ret;
if (prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
mmap_read_lock(mm);
ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
pages, NULL);
if (ret > 0) {
int i;
/*
* The zero page is always resident, we don't need to pin it
* and it falls into our invalid/reserved test so we don't
* unpin in put_pfn(). Unpin all zero pages in the batch here.
*/
for (i = 0 ; i < ret; i++) {
if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
unpin_user_page(pages[i]);
}
*pfn = page_to_pfn(pages[0]);
goto done;
}
vaddr = untagged_addr_remote(mm, vaddr);
retry:
vma = vma_lookup(mm, vaddr);
if (vma && vma->vm_flags & VM_PFNMAP) {
ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
if (ret == -EAGAIN)
goto retry;
if (!ret) {
if (is_invalid_reserved_pfn(*pfn))
ret = 1;
else
ret = -EFAULT;
}
}
done:
mmap_read_unlock(mm);
return ret;
}
/*
* Attempt to pin pages. We really don't want to track all the pfns and
* the iommu can only map chunks of consecutive pfns anyway, so get the
* first page and all consecutive pages with the same locking.
*/
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base,
unsigned long limit, struct vfio_batch *batch)
{
unsigned long pfn;
struct mm_struct *mm = current->mm;
long ret, pinned = 0, lock_acct = 0;
bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
/* This code path is only user initiated */
if (!mm)
return -ENODEV;
if (batch->size) {
/* Leftover pages in batch from an earlier call. */
*pfn_base = page_to_pfn(batch->pages[batch->offset]);
pfn = *pfn_base;
rsvd = is_invalid_reserved_pfn(*pfn_base);
} else {
*pfn_base = 0;
}
while (npage) {
if (!batch->size) {
/* Empty batch, so refill it. */
long req_pages = min_t(long, npage, batch->capacity);
ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot,
&pfn, batch->pages);
if (ret < 0)
goto unpin_out;
batch->size = ret;
batch->offset = 0;
if (!*pfn_base) {
*pfn_base = pfn;
rsvd = is_invalid_reserved_pfn(*pfn_base);
}
}
/*
* pfn is preset for the first iteration of this inner loop and
* updated at the end to handle a VM_PFNMAP pfn. In that case,
* batch->pages isn't valid (there's no struct page), so allow
* batch->pages to be touched only when there's more than one
* pfn to check, which guarantees the pfns are from a
* !VM_PFNMAP vma.
*/
while (true) {
if (pfn != *pfn_base + pinned ||
rsvd != is_invalid_reserved_pfn(pfn))
goto out;
/*
* Reserved pages aren't counted against the user,
* externally pinned pages are already counted against
* the user.
*/
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!dma->lock_cap &&
mm->locked_vm + lock_acct + 1 > limit) {
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, limit << PAGE_SHIFT);
ret = -ENOMEM;
goto unpin_out;
}
lock_acct++;
}
pinned++;
npage--;
vaddr += PAGE_SIZE;
iova += PAGE_SIZE;
batch->offset++;
batch->size--;
if (!batch->size)
break;
pfn = page_to_pfn(batch->pages[batch->offset]);
}
if (unlikely(disable_hugepages))
break;
}
out:
ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out:
if (batch->size == 1 && !batch->offset) {
/* May be a VM_PFNMAP pfn, which the batch can't remember. */
put_pfn(pfn, dma->prot);
batch->size = 0;
}
if (ret < 0) {
if (pinned && !rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
put_pfn(pfn, dma->prot);
}
vfio_batch_unpin(batch, dma);
return ret;
}
return pinned;
}
static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
unsigned long pfn, long npage,
bool do_accounting)
{
long unlocked = 0, locked = 0;
long i;
for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
if (put_pfn(pfn++, dma->prot)) {
unlocked++;
if (vfio_find_vpfn(dma, iova))
locked++;
}
}
if (do_accounting)
vfio_lock_acct(dma, locked - unlocked, true);
return unlocked;
}
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
struct page *pages[1];
struct mm_struct *mm;
int ret;
mm = dma->mm;
if (!mmget_not_zero(mm))
return -ENODEV;
ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
if (ret != 1)
goto out;
ret = 0;
if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
ret = vfio_lock_acct(dma, 1, false);
if (ret) {
put_pfn(*pfn_base, dma->prot);
if (ret == -ENOMEM)
pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
"(%ld) exceeded\n", __func__,
dma->task->comm, task_pid_nr(dma->task),
task_rlimit(dma->task, RLIMIT_MEMLOCK));
}
}
out:
mmput(mm);
return ret;
}
static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
bool do_accounting)
{
int unlocked;
struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
if (!vpfn)
return 0;
unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
if (do_accounting)
vfio_lock_acct(dma, -unlocked, true);
return unlocked;
}
static int vfio_iommu_type1_pin_pages(void *iommu_data,
struct iommu_group *iommu_group,
dma_addr_t user_iova,
int npage, int prot,
struct page **pages)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
int i, j, ret;
unsigned long remote_vaddr;
struct vfio_dma *dma;
bool do_accounting;
if (!iommu || !pages)
return -EINVAL;
/* Supported for v2 version only */
if (!iommu->v2)
return -EACCES;
mutex_lock(&iommu->lock);
if (WARN_ONCE(iommu->vaddr_invalid_count,
"vfio_pin_pages not allowed with VFIO_UPDATE_VADDR\n")) {
ret = -EBUSY;
goto pin_done;
}
/* Fail if no dma_umap notifier is registered */
if (list_empty(&iommu->device_list)) {
ret = -EINVAL;
goto pin_done;
}
/*
* If iommu capable domain exist in the container then all pages are
* already pinned and accounted. Accounting should be done if there is no
* iommu capable domain in the container.
*/
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
unsigned long phys_pfn;
dma_addr_t iova;
struct vfio_pfn *vpfn;
iova = user_iova + PAGE_SIZE * i;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma) {
ret = -EINVAL;
goto pin_unwind;
}
if ((dma->prot & prot) != prot) {
ret = -EPERM;
goto pin_unwind;
}
vpfn = vfio_iova_get_vfio_pfn(dma, iova);
if (vpfn) {
pages[i] = pfn_to_page(vpfn->pfn);
continue;
}
remote_vaddr = dma->vaddr + (iova - dma->iova);
ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn,
do_accounting);
if (ret)
goto pin_unwind;
if (!pfn_valid(phys_pfn)) {
ret = -EINVAL;
goto pin_unwind;
}
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
if (ret) {
if (put_pfn(phys_pfn, dma->prot) && do_accounting)
vfio_lock_acct(dma, -1, true);
goto pin_unwind;
}
pages[i] = pfn_to_page(phys_pfn);
if (iommu->dirty_page_tracking) {
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
/*
* Bitmap populated with the smallest supported page
* size
*/
bitmap_set(dma->bitmap,
(iova - dma->iova) >> pgshift, 1);
}
}
ret = i;
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
if (!group->pinned_page_dirty_scope) {
group->pinned_page_dirty_scope = true;
iommu->num_non_pinned_groups--;
}
goto pin_done;
pin_unwind:
pages[i] = NULL;
for (j = 0; j < i; j++) {
dma_addr_t iova;
iova = user_iova + PAGE_SIZE * j;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
vfio_unpin_page_external(dma, iova, do_accounting);
pages[j] = NULL;
}
pin_done:
mutex_unlock(&iommu->lock);
return ret;
}
static void vfio_iommu_type1_unpin_pages(void *iommu_data,
dma_addr_t user_iova, int npage)
{
struct vfio_iommu *iommu = iommu_data;
bool do_accounting;
int i;
/* Supported for v2 version only */
if (WARN_ON(!iommu->v2))
return;
mutex_lock(&iommu->lock);
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
dma_addr_t iova = user_iova + PAGE_SIZE * i;
struct vfio_dma *dma;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma)
break;
vfio_unpin_page_external(dma, iova, do_accounting);
}
mutex_unlock(&iommu->lock);
WARN_ON(i != npage);
}
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
struct list_head *regions,
struct iommu_iotlb_gather *iotlb_gather)
{
long unlocked = 0;
struct vfio_regions *entry, *next;
iommu_iotlb_sync(domain->domain, iotlb_gather);
list_for_each_entry_safe(entry, next, regions, list) {
unlocked += vfio_unpin_pages_remote(dma,
entry->iova,
entry->phys >> PAGE_SHIFT,
entry->len >> PAGE_SHIFT,
false);
list_del(&entry->list);
kfree(entry);
}
cond_resched();
return unlocked;
}
/*
* Generally, VFIO needs to unpin remote pages after each IOTLB flush.
* Therefore, when using IOTLB flush sync interface, VFIO need to keep track
* of these regions (currently using a list).
*
* This value specifies maximum number of regions for each IOTLB flush sync.
*/
#define VFIO_IOMMU_TLB_SYNC_MAX 512
static size_t unmap_unpin_fast(struct vfio_domain *domain,
struct vfio_dma *dma, dma_addr_t *iova,
size_t len, phys_addr_t phys, long *unlocked,
struct list_head *unmapped_list,
int *unmapped_cnt,
struct iommu_iotlb_gather *iotlb_gather)
{
size_t unmapped = 0;
struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
unmapped = iommu_unmap_fast(domain->domain, *iova, len,
iotlb_gather);
if (!unmapped) {
kfree(entry);
} else {
entry->iova = *iova;
entry->phys = phys;
entry->len = unmapped;
list_add_tail(&entry->list, unmapped_list);
*iova += unmapped;
(*unmapped_cnt)++;
}
}
/*
* Sync if the number of fast-unmap regions hits the limit
* or in case of errors.
*/
if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
*unlocked += vfio_sync_unpin(dma, domain, unmapped_list,
iotlb_gather);
*unmapped_cnt = 0;
}
return unmapped;
}
static size_t unmap_unpin_slow(struct vfio_domain *domain,
struct vfio_dma *dma, dma_addr_t *iova,
size_t len, phys_addr_t phys,
long *unlocked)
{
size_t unmapped = iommu_unmap(domain->domain, *iova, len);
if (unmapped) {
*unlocked += vfio_unpin_pages_remote(dma, *iova,
phys >> PAGE_SHIFT,
unmapped >> PAGE_SHIFT,
false);
*iova += unmapped;
cond_resched();
}
return unmapped;
}
static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
bool do_accounting)
{
dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
LIST_HEAD(unmapped_region_list);
struct iommu_iotlb_gather iotlb_gather;
int unmapped_region_cnt = 0;
long unlocked = 0;
if (!dma->size)
return 0;
if (list_empty(&iommu->domain_list))
return 0;
/*
* We use the IOMMU to track the physical addresses, otherwise we'd
* need a much more complicated tracking system. Unfortunately that
* means we need to use one of the iommu domains to figure out the
* pfns to unpin. The rest need to be unmapped in advance so we have
* no iommu translations remaining when the pages are unpinned.
*/
domain = d = list_first_entry(&iommu->domain_list,
struct vfio_domain, next);
list_for_each_entry_continue(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, dma->iova, dma->size);
cond_resched();
}
iommu_iotlb_gather_init(&iotlb_gather);
while (iova < end) {
size_t unmapped, len;
phys_addr_t phys, next;
phys = iommu_iova_to_phys(domain->domain, iova);
if (WARN_ON(!phys)) {
iova += PAGE_SIZE;
continue;
}
/*
* To optimize for fewer iommu_unmap() calls, each of which
* may require hardware cache flushing, try to find the
* largest contiguous physical memory chunk to unmap.
*/
for (len = PAGE_SIZE;
!domain->fgsp && iova + len < end; len += PAGE_SIZE) {
next = iommu_iova_to_phys(domain->domain, iova + len);
if (next != phys + len)
break;
}
/*
* First, try to use fast unmap/unpin. In case of failure,
* switch to slow unmap/unpin path.
*/
unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
&unlocked, &unmapped_region_list,
&unmapped_region_cnt,
&iotlb_gather);
if (!unmapped) {
unmapped = unmap_unpin_slow(domain, dma, &iova, len,
phys, &unlocked);
if (WARN_ON(!unmapped))
break;
}
}
dma->iommu_mapped = false;
if (unmapped_region_cnt) {
unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list,
&iotlb_gather);
}
if (do_accounting) {
vfio_lock_acct(dma, -unlocked, true);
return 0;
}
return unlocked;
}
static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
{
WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
vfio_unmap_unpin(iommu, dma, true);
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
mmdrop(dma->mm);
vfio_dma_bitmap_free(dma);
if (dma->vaddr_invalid)
iommu->vaddr_invalid_count--;
kfree(dma);
iommu->dma_avail++;
}
static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
iommu->pgsize_bitmap = ULONG_MAX;
list_for_each_entry(domain, &iommu->domain_list, next)
iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
/*
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
* we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
* That way the user will be able to map/unmap buffers whose size/
* start address is aligned with PAGE_SIZE. Pinning code uses that
* granularity while iommu driver can use the sub-PAGE_SIZE size
* to map the buffer.
*/
if (iommu->pgsize_bitmap & ~PAGE_MASK) {
iommu->pgsize_bitmap &= PAGE_MASK;
iommu->pgsize_bitmap |= PAGE_SIZE;
}
}
static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
struct vfio_dma *dma, dma_addr_t base_iova,
size_t pgsize)
{
unsigned long pgshift = __ffs(pgsize);
unsigned long nbits = dma->size >> pgshift;
unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
unsigned long copy_offset = bit_offset / BITS_PER_LONG;
unsigned long shift = bit_offset % BITS_PER_LONG;
unsigned long leftover;
/*
* mark all pages dirty if any IOMMU capable device is not able
* to report dirty pages and all pages are pinned and mapped.
*/
if (iommu->num_non_pinned_groups && dma->iommu_mapped)
bitmap_set(dma->bitmap, 0, nbits);
if (shift) {
bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
nbits + shift);
if (copy_from_user(&leftover,
(void __user *)(bitmap + copy_offset),
sizeof(leftover)))
return -EFAULT;
bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift);
}
if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap,
DIRTY_BITMAP_BYTES(nbits + shift)))
return -EFAULT;
return 0;
}
static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
dma_addr_t iova, size_t size, size_t pgsize)
{
struct vfio_dma *dma;
struct rb_node *n;
unsigned long pgshift = __ffs(pgsize);
int ret;
/*
* GET_BITMAP request must fully cover vfio_dma mappings. Multiple
* vfio_dma mappings may be clubbed by specifying large ranges, but
* there must not be any previous mappings bisected by the range.
* An error will be returned if these conditions are not met.
*/
dma = vfio_find_dma(iommu, iova, 1);
if (dma && dma->iova != iova)
return -EINVAL;
dma = vfio_find_dma(iommu, iova + size - 1, 0);
if (dma && dma->iova + dma->size != iova + size)
return -EINVAL;
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
if (dma->iova < iova)
continue;
if (dma->iova > iova + size - 1)
break;
ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
if (ret)
return ret;
/*
* Re-populate bitmap to include all pinned pages which are
* considered as dirty but exclude pages which are unpinned and
* pages which are marked dirty by vfio_dma_rw()
*/
bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
vfio_dma_populate_bitmap(dma, pgsize);
}
return 0;
}
static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
{
if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
(bitmap_size < DIRTY_BITMAP_BYTES(npages)))
return -EINVAL;
return 0;
}
/*
* Notify VFIO drivers using vfio_register_emulated_iommu_dev() to invalidate
* and unmap iovas within the range we're about to unmap. Drivers MUST unpin
* pages in response to an invalidation.
*/
static void vfio_notify_dma_unmap(struct vfio_iommu *iommu,
struct vfio_dma *dma)
{
struct vfio_device *device;
if (list_empty(&iommu->device_list))
return;
/*
* The device is expected to call vfio_unpin_pages() for any IOVA it has
* pinned within the range. Since vfio_unpin_pages() will eventually
* call back down to this code and try to obtain the iommu->lock we must
* drop it.
*/
mutex_lock(&iommu->device_list_lock);
mutex_unlock(&iommu->lock);
list_for_each_entry(device, &iommu->device_list, iommu_entry)
device->ops->dma_unmap(device, dma->iova, dma->size);
mutex_unlock(&iommu->device_list_lock);
mutex_lock(&iommu->lock);
}
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap,
struct vfio_bitmap *bitmap)
{
struct vfio_dma *dma, *dma_last = NULL;
size_t unmapped = 0, pgsize;
int ret = -EINVAL, retries = 0;
unsigned long pgshift;
dma_addr_t iova = unmap->iova;
u64 size = unmap->size;
bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;
bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;
struct rb_node *n, *first_n;
mutex_lock(&iommu->lock);
/* Cannot update vaddr if mdev is present. */
if (invalidate_vaddr && !list_empty(&iommu->emulated_iommu_groups)) {
ret = -EBUSY;
goto unlock;
}
pgshift = __ffs(iommu->pgsize_bitmap);
pgsize = (size_t)1 << pgshift;
if (iova & (pgsize - 1))
goto unlock;
if (unmap_all) {
if (iova || size)
goto unlock;
size = U64_MAX;
} else if (!size || size & (pgsize - 1) ||
iova + size - 1 < iova || size > SIZE_MAX) {
goto unlock;
}
/* When dirty tracking is enabled, allow only min supported pgsize */
if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
(!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
goto unlock;
}
WARN_ON((pgsize - 1) & PAGE_MASK);
again:
/*
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
* avoid tracking individual mappings. This means that the granularity
* of the original mapping was lost and the user was allowed to attempt
* to unmap any range. Depending on the contiguousness of physical
* memory and page sizes supported by the IOMMU, arbitrary unmaps may
* or may not have worked. We only guaranteed unmap granularity
* matching the original mapping; even though it was untracked here,
* the original mappings are reflected in IOMMU mappings. This
* resulted in a couple unusual behaviors. First, if a range is not
* able to be unmapped, ex. a set of 4k pages that was mapped as a
* 2M hugepage into the IOMMU, the unmap ioctl returns success but with
* a zero sized unmap. Also, if an unmap request overlaps the first
* address of a hugepage, the IOMMU will unmap the entire hugepage.
* This also returns success and the returned unmap size reflects the
* actual size unmapped.
*
* We attempt to maintain compatibility with this "v1" interface, but
* we take control out of the hands of the IOMMU. Therefore, an unmap
* request offset from the beginning of the original mapping will
* return success with zero sized unmap. And an unmap request covering
* the first iova of mapping will unmap the entire range.
*
* The v2 version of this interface intends to be more deterministic.
* Unmap requests must fully cover previous mappings. Multiple
* mappings may still be unmaped by specifying large ranges, but there
* must not be any previous mappings bisected by the range. An error
* will be returned if these conditions are not met. The v2 interface
* will only return success and a size of zero if there were no
* mappings within the range.
*/
if (iommu->v2 && !unmap_all) {
dma = vfio_find_dma(iommu, iova, 1);
if (dma && dma->iova != iova)
goto unlock;
dma = vfio_find_dma(iommu, iova + size - 1, 0);
if (dma && dma->iova + dma->size != iova + size)
goto unlock;
}
ret = 0;
n = first_n = vfio_find_dma_first_node(iommu, iova, size);
while (n) {
dma = rb_entry(n, struct vfio_dma, node);
if (dma->iova >= iova + size)
break;
if (!iommu->v2 && iova > dma->iova)
break;
if (invalidate_vaddr) {
if (dma->vaddr_invalid) {
struct rb_node *last_n = n;
for (n = first_n; n != last_n; n = rb_next(n)) {
dma = rb_entry(n,
struct vfio_dma, node);
dma->vaddr_invalid = false;
iommu->vaddr_invalid_count--;
}
ret = -EINVAL;
unmapped = 0;
break;
}
dma->vaddr_invalid = true;
iommu->vaddr_invalid_count++;
unmapped += dma->size;
n = rb_next(n);
continue;
}
if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
if (dma_last == dma) {
BUG_ON(++retries > 10);
} else {
dma_last = dma;
retries = 0;
}
vfio_notify_dma_unmap(iommu, dma);
goto again;
}
if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
ret = update_user_bitmap(bitmap->data, iommu, dma,
iova, pgsize);
if (ret)
break;
}
unmapped += dma->size;
n = rb_next(n);
vfio_remove_dma(iommu, dma);
}
unlock:
mutex_unlock(&iommu->lock);
/* Report how much was unmapped */
unmap->size = unmapped;
return ret;
}
static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
unsigned long pfn, long npage, int prot)
{
struct vfio_domain *d;
int ret;
list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
npage << PAGE_SHIFT, prot | IOMMU_CACHE,
GFP_KERNEL);
if (ret)
goto unwind;
cond_resched();
}
return 0;
unwind:
list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
cond_resched();
}
return ret;
}
static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
size_t map_size)
{
dma_addr_t iova = dma->iova;
unsigned long vaddr = dma->vaddr;
struct vfio_batch batch;
size_t size = map_size;
long npage;
unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret = 0;
vfio_batch_init(&batch);
while (size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
size >> PAGE_SHIFT, &pfn, limit,
&batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
break;
}
/* Map it! */
ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
dma->prot);
if (ret) {
vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
npage, true);
vfio_batch_unpin(&batch, dma);
break;
}
size -= npage << PAGE_SHIFT;
dma->size += npage << PAGE_SHIFT;
}
vfio_batch_fini(&batch);
dma->iommu_mapped = true;
if (ret)
vfio_remove_dma(iommu, dma);
return ret;
}
/*
* Check dma map request is within a valid iova range
*/
static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
dma_addr_t start, dma_addr_t end)
{
struct list_head *iova = &iommu->iova_list;
struct vfio_iova *node;
list_for_each_entry(node, iova, list) {
if (start >= node->start && end <= node->end)
return true;
}
/*
* Check for list_empty() as well since a container with
* a single mdev device will have an empty list.
*/
return list_empty(iova);
}
static int vfio_change_dma_owner(struct vfio_dma *dma)
{
struct task_struct *task = current->group_leader;
struct mm_struct *mm = current->mm;
long npage = dma->locked_vm;
bool lock_cap;
int ret;
if (mm == dma->mm)
return 0;
lock_cap = capable(CAP_IPC_LOCK);
ret = mm_lock_acct(task, mm, lock_cap, npage);
if (ret)
return ret;
if (mmget_not_zero(dma->mm)) {
mm_lock_acct(dma->task, dma->mm, dma->lock_cap, -npage);
mmput(dma->mm);
}
if (dma->task != task) {
put_task_struct(dma->task);
dma->task = get_task_struct(task);
}
mmdrop(dma->mm);
dma->mm = mm;
mmgrab(dma->mm);
dma->lock_cap = lock_cap;
return 0;
}
static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map)
{
bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;
dma_addr_t iova = map->iova;
unsigned long vaddr = map->vaddr;
size_t size = map->size;
int ret = 0, prot = 0;
size_t pgsize;
struct vfio_dma *dma;
/* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
return -EINVAL;
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE;
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ;
if ((prot && set_vaddr) || (!prot && !set_vaddr))
return -EINVAL;
mutex_lock(&iommu->lock);
pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
WARN_ON((pgsize - 1) & PAGE_MASK);
if (!size || (size | iova | vaddr) & (pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
/* Don't allow IOVA or virtual address wrap */
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
ret = -EINVAL;
goto out_unlock;
}
dma = vfio_find_dma(iommu, iova, size);
if (set_vaddr) {
if (!dma) {
ret = -ENOENT;
} else if (!dma->vaddr_invalid || dma->iova != iova ||
dma->size != size) {
ret = -EINVAL;
} else {
ret = vfio_change_dma_owner(dma);
if (ret)
goto out_unlock;
dma->vaddr = vaddr;
dma->vaddr_invalid = false;
iommu->vaddr_invalid_count--;
}
goto out_unlock;
} else if (dma) {
ret = -EEXIST;
goto out_unlock;
}
if (!iommu->dma_avail) {
ret = -ENOSPC;
goto out_unlock;
}
if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
ret = -EINVAL;
goto out_unlock;
}
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
ret = -ENOMEM;
goto out_unlock;
}
iommu->dma_avail--;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
/*
* We need to be able to both add to a task's locked memory and test
* against the locked memory limit and we need to be able to do both
* outside of this call path as pinning can be asynchronous via the
* external interfaces for mdev devices. RLIMIT_MEMLOCK requires a
* task_struct. Save the group_leader so that all DMA tracking uses
* the same task, to make debugging easier. VM locked pages requires
* an mm_struct, so grab the mm in case the task dies.
*/
get_task_struct(current->group_leader);
dma->task = current->group_leader;
dma->lock_cap = capable(CAP_IPC_LOCK);
dma->mm = current->mm;
mmgrab(dma->mm);
dma->pfn_list = RB_ROOT;
/* Insert zero-sized and grow as we map chunks of it */
vfio_link_dma(iommu, dma);
/* Don't pin and map if container doesn't contain IOMMU capable domain*/
if (list_empty(&iommu->domain_list))
dma->size = size;
else
ret = vfio_pin_map_dma(iommu, dma, size);
if (!ret && iommu->dirty_page_tracking) {
ret = vfio_dma_bitmap_alloc(dma, pgsize);
if (ret)
vfio_remove_dma(iommu, dma);
}
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
struct vfio_batch batch;
struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret;
/* Arbitrarily pick the first domain in the list for lookups */
if (!list_empty(&iommu->domain_list))
d = list_first_entry(&iommu->domain_list,
struct vfio_domain, next);
vfio_batch_init(&batch);
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
struct vfio_dma *dma;
dma_addr_t iova;
dma = rb_entry(n, struct vfio_dma, node);
iova = dma->iova;
while (iova < dma->iova + dma->size) {
phys_addr_t phys;
size_t size;
if (dma->iommu_mapped) {
phys_addr_t p;
dma_addr_t i;
if (WARN_ON(!d)) { /* mapped w/o a domain?! */
ret = -EINVAL;
goto unwind;
}
phys = iommu_iova_to_phys(d->domain, iova);
if (WARN_ON(!phys)) {
iova += PAGE_SIZE;
continue;
}
size = PAGE_SIZE;
p = phys + size;
i = iova + size;
while (i < dma->iova + dma->size &&
p == iommu_iova_to_phys(d->domain, i)) {
size += PAGE_SIZE;
p += PAGE_SIZE;
i += PAGE_SIZE;
}
} else {
unsigned long pfn;
unsigned long vaddr = dma->vaddr +
(iova - dma->iova);
size_t n = dma->iova + dma->size - iova;
long npage;
npage = vfio_pin_pages_remote(dma, vaddr,
n >> PAGE_SHIFT,
&pfn, limit,
&batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
goto unwind;
}
phys = pfn << PAGE_SHIFT;
size = npage << PAGE_SHIFT;
}
ret = iommu_map(domain->domain, iova, phys, size,
dma->prot | IOMMU_CACHE, GFP_KERNEL);
if (ret) {
if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova,
phys >> PAGE_SHIFT,
size >> PAGE_SHIFT,
true);
vfio_batch_unpin(&batch, dma);
}
goto unwind;
}
iova += size;
}
}
/* All dmas are now mapped, defer to second tree walk for unwind */
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
dma->iommu_mapped = true;
}
vfio_batch_fini(&batch);
return 0;
unwind:
for (; n; n = rb_prev(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
dma_addr_t iova;
if (dma->iommu_mapped) {
iommu_unmap(domain->domain, dma->iova, dma->size);
continue;
}
iova = dma->iova;
while (iova < dma->iova + dma->size) {
phys_addr_t phys, p;
size_t size;
dma_addr_t i;
phys = iommu_iova_to_phys(domain->domain, iova);
if (!phys) {
iova += PAGE_SIZE;
continue;
}
size = PAGE_SIZE;
p = phys + size;
i = iova + size;
while (i < dma->iova + dma->size &&
p == iommu_iova_to_phys(domain->domain, i)) {
size += PAGE_SIZE;
p += PAGE_SIZE;
i += PAGE_SIZE;
}
iommu_unmap(domain->domain, iova, size);
vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
size >> PAGE_SHIFT, true);
}
}
vfio_batch_fini(&batch);
return ret;
}
/*
* We change our unmap behavior slightly depending on whether the IOMMU
* supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
* for practically any contiguous power-of-two mapping we give it. This means
* we don't need to look for contiguous chunks ourselves to make unmapping
* more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
* with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
* significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
* hugetlbfs is in use.
*/
static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
{
int ret, order = get_order(PAGE_SIZE * 2);
struct vfio_iova *region;
struct page *pages;
dma_addr_t start;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages)
return;
list_for_each_entry(region, regions, list) {
start = ALIGN(region->start, PAGE_SIZE * 2);
if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
continue;
ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
if (unmapped == PAGE_SIZE)
iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
else
domain->fgsp = true;
}
break;
}
__free_pages(pages, order);
}
static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
struct iommu_group *iommu_group)
{
struct vfio_iommu_group *g;
list_for_each_entry(g, &domain->group_list, next) {
if (g->iommu_group == iommu_group)
return g;
}
return NULL;
}
static struct vfio_iommu_group*
vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group)
{
struct vfio_iommu_group *group;
struct vfio_domain *domain;
list_for_each_entry(domain, &iommu->domain_list, next) {
group = find_iommu_group(domain, iommu_group);
if (group)
return group;
}
list_for_each_entry(group, &iommu->emulated_iommu_groups, next)
if (group->iommu_group == iommu_group)
return group;
return NULL;
}
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
phys_addr_t *base)
{
struct iommu_resv_region *region;
bool ret = false;
list_for_each_entry(region, group_resv_regions, list) {
/*
* The presence of any 'real' MSI regions should take
* precedence over the software-managed one if the
* IOMMU driver happens to advertise both types.
*/
if (region->type == IOMMU_RESV_MSI) {
ret = false;
break;
}
if (region->type == IOMMU_RESV_SW_MSI) {
*base = region->start;
ret = true;
}
}
return ret;
}
/*
* This is a helper function to insert an address range to iova list.
* The list is initially created with a single entry corresponding to
* the IOMMU domain geometry to which the device group is attached.
* The list aperture gets modified when a new domain is added to the
* container if the new aperture doesn't conflict with the current one
* or with any existing dma mappings. The list is also modified to
* exclude any reserved regions associated with the device group.
*/
static int vfio_iommu_iova_insert(struct list_head *head,
dma_addr_t start, dma_addr_t end)
{
struct vfio_iova *region;
region = kmalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
INIT_LIST_HEAD(®ion->list);
region->start = start;
region->end = end;
list_add_tail(®ion->list, head);
return 0;
}
/*
* Check the new iommu aperture conflicts with existing aper or with any
* existing dma mappings.
*/
static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu,
dma_addr_t start, dma_addr_t end)
{
struct vfio_iova *first, *last;
struct list_head *iova = &iommu->iova_list;
if (list_empty(iova))
return false;
/* Disjoint sets, return conflict */
first = list_first_entry(iova, struct vfio_iova, list);
last = list_last_entry(iova, struct vfio_iova, list);
if (start > last->end || end < first->start)
return true;
/* Check for any existing dma mappings below the new start */
if (start > first->start) {
if (vfio_find_dma(iommu, first->start, start - first->start))
return true;
}
/* Check for any existing dma mappings beyond the new end */
if (end < last->end) {
if (vfio_find_dma(iommu, end + 1, last->end - end))
return true;
}
return false;
}
/*
* Resize iommu iova aperture window. This is called only if the new
* aperture has no conflict with existing aperture and dma mappings.
*/
static int vfio_iommu_aper_resize(struct list_head *iova,
dma_addr_t start, dma_addr_t end)
{
struct vfio_iova *node, *next;
if (list_empty(iova))
return vfio_iommu_iova_insert(iova, start, end);
/* Adjust iova list start */
list_for_each_entry_safe(node, next, iova, list) {
if (start < node->start)
break;
if (start >= node->start && start < node->end) {
node->start = start;
break;
}
/* Delete nodes before new start */
list_del(&node->list);
kfree(node);
}
/* Adjust iova list end */
list_for_each_entry_safe(node, next, iova, list) {
if (end > node->end)
continue;
if (end > node->start && end <= node->end) {
node->end = end;
continue;
}
/* Delete nodes after new end */
list_del(&node->list);
kfree(node);
}
return 0;
}
/*
* Check reserved region conflicts with existing dma mappings
*/
static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu,
struct list_head *resv_regions)
{
struct iommu_resv_region *region;
/* Check for conflict with existing dma mappings */
list_for_each_entry(region, resv_regions, list) {
if (region->type == IOMMU_RESV_DIRECT_RELAXABLE)
continue;
if (vfio_find_dma(iommu, region->start, region->length))
return true;
}
return false;
}
/*
* Check iova region overlap with reserved regions and
* exclude them from the iommu iova range
*/
static int vfio_iommu_resv_exclude(struct list_head *iova,
struct list_head *resv_regions)
{
struct iommu_resv_region *resv;
struct vfio_iova *n, *next;
list_for_each_entry(resv, resv_regions, list) {
phys_addr_t start, end;
if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
continue;
start = resv->start;
end = resv->start + resv->length - 1;
list_for_each_entry_safe(n, next, iova, list) {
int ret = 0;
/* No overlap */
if (start > n->end || end < n->start)
continue;
/*
* Insert a new node if current node overlaps with the
* reserve region to exclude that from valid iova range.
* Note that, new node is inserted before the current
* node and finally the current node is deleted keeping
* the list updated and sorted.
*/
if (start > n->start)
ret = vfio_iommu_iova_insert(&n->list, n->start,
start - 1);
if (!ret && end < n->end)
ret = vfio_iommu_iova_insert(&n->list, end + 1,
n->end);
if (ret)
return ret;
list_del(&n->list);
kfree(n);
}
}
if (list_empty(iova))
return -EINVAL;
return 0;
}
static void vfio_iommu_resv_free(struct list_head *resv_regions)
{
struct iommu_resv_region *n, *next;
list_for_each_entry_safe(n, next, resv_regions, list) {
list_del(&n->list);
kfree(n);
}
}
static void vfio_iommu_iova_free(struct list_head *iova)
{
struct vfio_iova *n, *next;
list_for_each_entry_safe(n, next, iova, list) {
list_del(&n->list);
kfree(n);
}
}
static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct list_head *iova = &iommu->iova_list;
struct vfio_iova *n;
int ret;
list_for_each_entry(n, iova, list) {
ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end);
if (ret)
goto out_free;
}
return 0;
out_free:
vfio_iommu_iova_free(iova_copy);
return ret;
}
static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct list_head *iova = &iommu->iova_list;
vfio_iommu_iova_free(iova);
list_splice_tail(iova_copy, iova);
}
static int vfio_iommu_domain_alloc(struct device *dev, void *data)
{
struct iommu_domain **domain = data;
*domain = iommu_domain_alloc(dev->bus);
return 1; /* Don't iterate */
}
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group, enum vfio_group_type type)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
bool resv_msi;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
int ret = -EBUSY;
mutex_lock(&iommu->lock);
/* Attach could require pinning, so disallow while vaddr is invalid. */
if (iommu->vaddr_invalid_count)
goto out_unlock;
/* Check for duplicates */
ret = -EINVAL;
if (vfio_iommu_find_iommu_group(iommu, iommu_group))
goto out_unlock;
ret = -ENOMEM;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
goto out_unlock;
group->iommu_group = iommu_group;
if (type == VFIO_EMULATED_IOMMU) {
list_add(&group->next, &iommu->emulated_iommu_groups);
/*
* An emulated IOMMU group cannot dirty memory directly, it can
* only use interfaces that provide dirty tracking.
* The iommu scope can only be promoted with the addition of a
* dirty tracking group.
*/
group->pinned_page_dirty_scope = true;
ret = 0;
goto out_unlock;
}
ret = -ENOMEM;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
goto out_free_group;
/*
* Going via the iommu_group iterator avoids races, and trivially gives
* us a representative device for the IOMMU API call. We don't actually
* want to iterate beyond the first device (if any).
*/
ret = -EIO;
iommu_group_for_each_dev(iommu_group, &domain->domain,
vfio_iommu_domain_alloc);
if (!domain->domain)
goto out_free_domain;
if (iommu->nesting) {
ret = iommu_enable_nesting(domain->domain);
if (ret)
goto out_domain;
}
ret = iommu_attach_group(domain->domain, group->iommu_group);
if (ret)
goto out_domain;
/* Get aperture info */
geo = &domain->domain->geometry;
if (vfio_iommu_aper_conflict(iommu, geo->aperture_start,
geo->aperture_end)) {
ret = -EINVAL;
goto out_detach;
}
ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions);
if (ret)
goto out_detach;
if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) {
ret = -EINVAL;
goto out_detach;
}
/*
* We don't want to work on the original iova list as the list
* gets modified and in case of failure we have to retain the
* original list. Get a copy here.
*/
ret = vfio_iommu_iova_get_copy(iommu, &iova_copy);
if (ret)
goto out_detach;
ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start,
geo->aperture_end);
if (ret)
goto out_detach;
ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions);
if (ret)
goto out_detach;
resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base);
INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list);
if (!allow_unsafe_interrupts &&
!iommu_group_has_isolated_msi(iommu_group)) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
__func__);
ret = -EPERM;
goto out_detach;
}
/*
* If the IOMMU can block non-coherent operations (ie PCIe TLPs with
* no-snoop set) then VFIO always turns this feature on because on Intel
* platforms it optimizes KVM to disable wbinvd emulation.
*/
if (domain->domain->ops->enforce_cache_coherency)
domain->enforce_cache_coherency =
domain->domain->ops->enforce_cache_coherency(
domain->domain);
/*
* Try to match an existing compatible domain. We don't want to
* preclude an IOMMU driver supporting multiple bus_types and being
* able to include different bus_types in the same IOMMU domain, so
* we test whether the domains use the same iommu_ops rather than
* testing if they're on the same bus_type.
*/
list_for_each_entry(d, &iommu->domain_list, next) {
if (d->domain->ops == domain->domain->ops &&
d->enforce_cache_coherency ==
domain->enforce_cache_coherency) {
iommu_detach_group(domain->domain, group->iommu_group);
if (!iommu_attach_group(d->domain,
group->iommu_group)) {
list_add(&group->next, &d->group_list);
iommu_domain_free(domain->domain);
kfree(domain);
goto done;
}
ret = iommu_attach_group(domain->domain,
group->iommu_group);
if (ret)
goto out_domain;
}
}
vfio_test_domain_fgsp(domain, &iova_copy);
/* replay mappings on new domains */
ret = vfio_iommu_replay(iommu, domain);
if (ret)
goto out_detach;
if (resv_msi) {
ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
if (ret && ret != -ENODEV)
goto out_detach;
}
list_add(&domain->next, &iommu->domain_list);
vfio_update_pgsize_bitmap(iommu);
done:
/* Delete the old one and insert new iova list */
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
/*
* An iommu backed group can dirty memory directly and therefore
* demotes the iommu scope until it declares itself dirty tracking
* capable via the page pinning interface.
*/
iommu->num_non_pinned_groups++;
mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions);
return 0;
out_detach:
iommu_detach_group(domain->domain, group->iommu_group);
out_domain:
iommu_domain_free(domain->domain);
vfio_iommu_iova_free(&iova_copy);
vfio_iommu_resv_free(&group_resv_regions);
out_free_domain:
kfree(domain);
out_free_group:
kfree(group);
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
{
struct rb_node *node;
while ((node = rb_first(&iommu->dma_list)))
vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
}
static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
{
struct rb_node *n, *p;
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
struct vfio_dma *dma;
long locked = 0, unlocked = 0;
dma = rb_entry(n, struct vfio_dma, node);
unlocked += vfio_unmap_unpin(iommu, dma, false);
p = rb_first(&dma->pfn_list);
for (; p; p = rb_next(p)) {
struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
node);
if (!is_invalid_reserved_pfn(vpfn->pfn))
locked++;
}
vfio_lock_acct(dma, locked - unlocked, true);
}
}
/*
* Called when a domain is removed in detach. It is possible that
* the removed domain decided the iova aperture window. Modify the
* iova aperture with the smallest window among existing domains.
*/
static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct vfio_domain *domain;
struct vfio_iova *node;
dma_addr_t start = 0;
dma_addr_t end = (dma_addr_t)~0;
if (list_empty(iova_copy))
return;
list_for_each_entry(domain, &iommu->domain_list, next) {
struct iommu_domain_geometry *geo = &domain->domain->geometry;
if (geo->aperture_start > start)
start = geo->aperture_start;
if (geo->aperture_end < end)
end = geo->aperture_end;
}
/* Modify aperture limits. The new aper is either same or bigger */
node = list_first_entry(iova_copy, struct vfio_iova, list);
node->start = start;
node = list_last_entry(iova_copy, struct vfio_iova, list);
node->end = end;
}
/*
* Called when a group is detached. The reserved regions for that
* group can be part of valid iova now. But since reserved regions
* may be duplicated among groups, populate the iova valid regions
* list again.
*/
static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct vfio_domain *d;
struct vfio_iommu_group *g;
struct vfio_iova *node;
dma_addr_t start, end;
LIST_HEAD(resv_regions);
int ret;
if (list_empty(iova_copy))
return -EINVAL;
list_for_each_entry(d, &iommu->domain_list, next) {
list_for_each_entry(g, &d->group_list, next) {
ret = iommu_get_group_resv_regions(g->iommu_group,
&resv_regions);
if (ret)
goto done;
}
}
node = list_first_entry(iova_copy, struct vfio_iova, list);
start = node->start;
node = list_last_entry(iova_copy, struct vfio_iova, list);
end = node->end;
/* purge the iova list and create new one */
vfio_iommu_iova_free(iova_copy);
ret = vfio_iommu_aper_resize(iova_copy, start, end);
if (ret)
goto done;
/* Exclude current reserved regions from iova ranges */
ret = vfio_iommu_resv_exclude(iova_copy, &resv_regions);
done:
vfio_iommu_resv_free(&resv_regions);
return ret;
}
static void vfio_iommu_type1_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
struct vfio_iommu_group *group;
bool update_dirty_scope = false;
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
list_for_each_entry(group, &iommu->emulated_iommu_groups, next) {
if (group->iommu_group != iommu_group)
continue;
update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
if (list_empty(&iommu->emulated_iommu_groups) &&
list_empty(&iommu->domain_list)) {
WARN_ON(!list_empty(&iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
}
goto detach_group_done;
}
/*
* Get a copy of iova list. This will be used to update
* and to replace the current one later. Please note that
* we will leave the original list as it is if update fails.
*/
vfio_iommu_iova_get_copy(iommu, &iova_copy);
list_for_each_entry(domain, &iommu->domain_list, next) {
group = find_iommu_group(domain, iommu_group);
if (!group)
continue;
iommu_detach_group(domain->domain, group->iommu_group);
update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
/*
* Group ownership provides privilege, if the group list is
* empty, the domain goes away. If it's the last domain with
* iommu and external domain doesn't exist, then all the
* mappings go away too. If it's the last domain with iommu and
* external domain exist, update accounting
*/
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
if (list_empty(&iommu->emulated_iommu_groups)) {
WARN_ON(!list_empty(
&iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
} else {
vfio_iommu_unmap_unpin_reaccount(iommu);
}
}
iommu_domain_free(domain->domain);
list_del(&domain->next);
kfree(domain);
vfio_iommu_aper_expand(iommu, &iova_copy);
vfio_update_pgsize_bitmap(iommu);
}
break;
}
if (!vfio_iommu_resv_refresh(iommu, &iova_copy))
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
else
vfio_iommu_iova_free(&iova_copy);
detach_group_done:
/*
* Removal of a group without dirty tracking may allow the iommu scope
* to be promoted.
*/
if (update_dirty_scope) {
iommu->num_non_pinned_groups--;
if (iommu->dirty_page_tracking)
vfio_iommu_populate_bitmap_full(iommu);
}
mutex_unlock(&iommu->lock);
}
static void *vfio_iommu_type1_open(unsigned long arg)
{
struct vfio_iommu *iommu;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
switch (arg) {
case VFIO_TYPE1_IOMMU:
break;
case VFIO_TYPE1_NESTING_IOMMU:
iommu->nesting = true;
fallthrough;
case VFIO_TYPE1v2_IOMMU:
iommu->v2 = true;
break;
default:
kfree(iommu);
return ERR_PTR(-EINVAL);
}
INIT_LIST_HEAD(&iommu->domain_list);
INIT_LIST_HEAD(&iommu->iova_list);
iommu->dma_list = RB_ROOT;
iommu->dma_avail = dma_entry_limit;
mutex_init(&iommu->lock);
mutex_init(&iommu->device_list_lock);
INIT_LIST_HEAD(&iommu->device_list);
iommu->pgsize_bitmap = PAGE_MASK;
INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
return iommu;
}
static void vfio_release_domain(struct vfio_domain *domain)
{
struct vfio_iommu_group *group, *group_tmp;
list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) {
iommu_detach_group(domain->domain, group->iommu_group);
list_del(&group->next);
kfree(group);
}
iommu_domain_free(domain->domain);
}
static void vfio_iommu_type1_release(void *iommu_data)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain, *domain_tmp;
struct vfio_iommu_group *group, *next_group;
list_for_each_entry_safe(group, next_group,
&iommu->emulated_iommu_groups, next) {
list_del(&group->next);
kfree(group);
}
vfio_iommu_unmap_unpin_all(iommu);
list_for_each_entry_safe(domain, domain_tmp,
&iommu->domain_list, next) {
vfio_release_domain(domain);
list_del(&domain->next);
kfree(domain);
}
vfio_iommu_iova_free(&iommu->iova_list);
kfree(iommu);
}
static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
int ret = 1;
mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next) {
if (!(domain->enforce_cache_coherency)) {
ret = 0;
break;
}
}
mutex_unlock(&iommu->lock);
return ret;
}
static bool vfio_iommu_has_emulated(struct vfio_iommu *iommu)
{
bool ret;
mutex_lock(&iommu->lock);
ret = !list_empty(&iommu->emulated_iommu_groups);
mutex_unlock(&iommu->lock);
return ret;
}
static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
unsigned long arg)
{
switch (arg) {
case VFIO_TYPE1_IOMMU:
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_NESTING_IOMMU:
case VFIO_UNMAP_ALL:
return 1;
case VFIO_UPDATE_VADDR:
/*
* Disable this feature if mdevs are present. They cannot
* safely pin/unpin/rw while vaddrs are being updated.
*/
return iommu && !vfio_iommu_has_emulated(iommu);
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
return vfio_domains_have_enforce_cache_coherency(iommu);
default:
return 0;
}
}
static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
size_t size)
{
struct vfio_info_cap_header *header;
struct vfio_iommu_type1_info_cap_iova_range *iova_cap;
header = vfio_info_cap_add(caps, size,
VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1);
if (IS_ERR(header))
return PTR_ERR(header);
iova_cap = container_of(header,
struct vfio_iommu_type1_info_cap_iova_range,
header);
iova_cap->nr_iovas = cap_iovas->nr_iovas;
memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges,
cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges));
return 0;
}
static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps)
{
struct vfio_iommu_type1_info_cap_iova_range *cap_iovas;
struct vfio_iova *iova;
size_t size;
int iovas = 0, i = 0, ret;
list_for_each_entry(iova, &iommu->iova_list, list)
iovas++;
if (!iovas) {
/*
* Return 0 as a container with a single mdev device
* will have an empty list
*/
return 0;
}
size = struct_size(cap_iovas, iova_ranges, iovas);
cap_iovas = kzalloc(size, GFP_KERNEL);
if (!cap_iovas)
return -ENOMEM;
cap_iovas->nr_iovas = iovas;
list_for_each_entry(iova, &iommu->iova_list, list) {
cap_iovas->iova_ranges[i].start = iova->start;
cap_iovas->iova_ranges[i].end = iova->end;
i++;
}
ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
kfree(cap_iovas);
return ret;
}
static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps)
{
struct vfio_iommu_type1_info_cap_migration cap_mig = {};
cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
cap_mig.header.version = 1;
cap_mig.flags = 0;
/* support minimum pgsize */
cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap);
cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX;
return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
}
static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps)
{
struct vfio_iommu_type1_info_dma_avail cap_dma_avail;
cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL;
cap_dma_avail.header.version = 1;
cap_dma_avail.avail = iommu->dma_avail;
return vfio_info_add_capability(caps, &cap_dma_avail.header,
sizeof(cap_dma_avail));
}
static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_info info = {};
unsigned long minsz;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
int ret;
minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
minsz = min_t(size_t, info.argsz, sizeof(info));
mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES;
info.iova_pgsizes = iommu->pgsize_bitmap;
ret = vfio_iommu_migration_build_caps(iommu, &caps);
if (!ret)
ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
if (!ret)
ret = vfio_iommu_iova_build_caps(iommu, &caps);
mutex_unlock(&iommu->lock);
if (ret)
return ret;
if (caps.size) {
info.flags |= VFIO_IOMMU_INFO_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg +
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info.cap_offset = sizeof(info);
}
kfree(caps.buf);
}
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
}
static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dma_map map;
unsigned long minsz;
uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE |
VFIO_DMA_MAP_FLAG_VADDR;
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
if (copy_from_user(&map, (void __user *)arg, minsz))
return -EFAULT;
if (map.argsz < minsz || map.flags & ~mask)
return -EINVAL;
return vfio_dma_do_map(iommu, &map);
}
static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dma_unmap unmap;
struct vfio_bitmap bitmap = { 0 };
uint32_t mask = VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP |
VFIO_DMA_UNMAP_FLAG_VADDR |
VFIO_DMA_UNMAP_FLAG_ALL;
unsigned long minsz;
int ret;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
if (copy_from_user(&unmap, (void __user *)arg, minsz))
return -EFAULT;
if (unmap.argsz < minsz || unmap.flags & ~mask)
return -EINVAL;
if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
(unmap.flags & (VFIO_DMA_UNMAP_FLAG_ALL |
VFIO_DMA_UNMAP_FLAG_VADDR)))
return -EINVAL;
if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
unsigned long pgshift;
if (unmap.argsz < (minsz + sizeof(bitmap)))
return -EINVAL;
if (copy_from_user(&bitmap,
(void __user *)(arg + minsz),
sizeof(bitmap)))
return -EFAULT;
if (!access_ok((void __user *)bitmap.data, bitmap.size))
return -EINVAL;
pgshift = __ffs(bitmap.pgsize);
ret = verify_bitmap_size(unmap.size >> pgshift,
bitmap.size);
if (ret)
return ret;
}
ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &unmap, minsz) ?
-EFAULT : 0;
}
static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dirty_bitmap dirty;
uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
unsigned long minsz;
int ret = 0;
if (!iommu->v2)
return -EACCES;
minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
if (copy_from_user(&dirty, (void __user *)arg, minsz))
return -EFAULT;
if (dirty.argsz < minsz || dirty.flags & ~mask)
return -EINVAL;
/* only one flag should be set at a time */
if (__ffs(dirty.flags) != __fls(dirty.flags))
return -EINVAL;
if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
size_t pgsize;
mutex_lock(&iommu->lock);
pgsize = 1 << __ffs(iommu->pgsize_bitmap);
if (!iommu->dirty_page_tracking) {
ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
if (!ret)
iommu->dirty_page_tracking = true;
}
mutex_unlock(&iommu->lock);
return ret;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
mutex_lock(&iommu->lock);
if (iommu->dirty_page_tracking) {
iommu->dirty_page_tracking = false;
vfio_dma_bitmap_free_all(iommu);
}
mutex_unlock(&iommu->lock);
return 0;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
struct vfio_iommu_type1_dirty_bitmap_get range;
unsigned long pgshift;
size_t data_size = dirty.argsz - minsz;
size_t iommu_pgsize;
if (!data_size || data_size < sizeof(range))
return -EINVAL;
if (copy_from_user(&range, (void __user *)(arg + minsz),
sizeof(range)))
return -EFAULT;
if (range.iova + range.size < range.iova)
return -EINVAL;
if (!access_ok((void __user *)range.bitmap.data,
range.bitmap.size))
return -EINVAL;
pgshift = __ffs(range.bitmap.pgsize);
ret = verify_bitmap_size(range.size >> pgshift,
range.bitmap.size);
if (ret)
return ret;
mutex_lock(&iommu->lock);
iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
/* allow only smallest supported pgsize */
if (range.bitmap.pgsize != iommu_pgsize) {
ret = -EINVAL;
goto out_unlock;
}
if (range.iova & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (!range.size || range.size & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (iommu->dirty_page_tracking)
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
iommu, range.iova,
range.size,
range.bitmap.pgsize);
else
ret = -EINVAL;
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
return -EINVAL;
}
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
struct vfio_iommu *iommu = iommu_data;
switch (cmd) {
case VFIO_CHECK_EXTENSION:
return vfio_iommu_type1_check_extension(iommu, arg);
case VFIO_IOMMU_GET_INFO:
return vfio_iommu_type1_get_info(iommu, arg);
case VFIO_IOMMU_MAP_DMA:
return vfio_iommu_type1_map_dma(iommu, arg);
case VFIO_IOMMU_UNMAP_DMA:
return vfio_iommu_type1_unmap_dma(iommu, arg);
case VFIO_IOMMU_DIRTY_PAGES:
return vfio_iommu_type1_dirty_pages(iommu, arg);
default:
return -ENOTTY;
}
}
static void vfio_iommu_type1_register_device(void *iommu_data,
struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
if (!vdev->ops->dma_unmap)
return;
/*
* list_empty(&iommu->device_list) is tested under the iommu->lock while
* iteration for dma_unmap must be done under the device_list_lock.
* Holding both locks here allows avoiding the device_list_lock in
* several fast paths. See vfio_notify_dma_unmap()
*/
mutex_lock(&iommu->lock);
mutex_lock(&iommu->device_list_lock);
list_add(&vdev->iommu_entry, &iommu->device_list);
mutex_unlock(&iommu->device_list_lock);
mutex_unlock(&iommu->lock);
}
static void vfio_iommu_type1_unregister_device(void *iommu_data,
struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
if (!vdev->ops->dma_unmap)
return;
mutex_lock(&iommu->lock);
mutex_lock(&iommu->device_list_lock);
list_del(&vdev->iommu_entry);
mutex_unlock(&iommu->device_list_lock);
mutex_unlock(&iommu->lock);
}
static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
dma_addr_t user_iova, void *data,
size_t count, bool write,
size_t *copied)
{
struct mm_struct *mm;
unsigned long vaddr;
struct vfio_dma *dma;
bool kthread = current->mm == NULL;
size_t offset;
*copied = 0;
dma = vfio_find_dma(iommu, user_iova, 1);
if (!dma)
return -EINVAL;
if ((write && !(dma->prot & IOMMU_WRITE)) ||
!(dma->prot & IOMMU_READ))
return -EPERM;
mm = dma->mm;
if (!mmget_not_zero(mm))
return -EPERM;
if (kthread)
kthread_use_mm(mm);
else if (current->mm != mm)
goto out;
offset = user_iova - dma->iova;
if (count > dma->size - offset)
count = dma->size - offset;
vaddr = dma->vaddr + offset;
if (write) {
*copied = copy_to_user((void __user *)vaddr, data,
count) ? 0 : count;
if (*copied && iommu->dirty_page_tracking) {
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
/*
* Bitmap populated with the smallest supported page
* size
*/
bitmap_set(dma->bitmap, offset >> pgshift,
((offset + *copied - 1) >> pgshift) -
(offset >> pgshift) + 1);
}
} else
*copied = copy_from_user(data, (void __user *)vaddr,
count) ? 0 : count;
if (kthread)
kthread_unuse_mm(mm);
out:
mmput(mm);
return *copied ? 0 : -EFAULT;
}
static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write)
{
struct vfio_iommu *iommu = iommu_data;
int ret = 0;
size_t done;
mutex_lock(&iommu->lock);
if (WARN_ONCE(iommu->vaddr_invalid_count,
"vfio_dma_rw not allowed with VFIO_UPDATE_VADDR\n")) {
ret = -EBUSY;
goto out;
}
while (count > 0) {
ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
count, write, &done);
if (ret)
break;
count -= done;
data += done;
user_iova += done;
}
out:
mutex_unlock(&iommu->lock);
return ret;
}
static struct iommu_domain *
vfio_iommu_type1_group_iommu_domain(void *iommu_data,
struct iommu_group *iommu_group)
{
struct iommu_domain *domain = ERR_PTR(-ENODEV);
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *d;
if (!iommu || !iommu_group)
return ERR_PTR(-EINVAL);
mutex_lock(&iommu->lock);
list_for_each_entry(d, &iommu->domain_list, next) {
if (find_iommu_group(d, iommu_group)) {
domain = d->domain;
break;
}
}
mutex_unlock(&iommu->lock);
return domain;
}
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1",
.owner = THIS_MODULE,
.open = vfio_iommu_type1_open,
.release = vfio_iommu_type1_release,
.ioctl = vfio_iommu_type1_ioctl,
.attach_group = vfio_iommu_type1_attach_group,
.detach_group = vfio_iommu_type1_detach_group,
.pin_pages = vfio_iommu_type1_pin_pages,
.unpin_pages = vfio_iommu_type1_unpin_pages,
.register_device = vfio_iommu_type1_register_device,
.unregister_device = vfio_iommu_type1_unregister_device,
.dma_rw = vfio_iommu_type1_dma_rw,
.group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
};
static int __init vfio_iommu_type1_init(void)
{
return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
}
static void __exit vfio_iommu_type1_cleanup(void)
{
vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
}
module_init(vfio_iommu_type1_init);
module_exit(vfio_iommu_type1_cleanup);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/vfio_iommu_type1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO core
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*/
#include <linux/cdev.h>
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/iommu.h>
#ifdef CONFIG_HAVE_KVM
#include <linux/kvm_host.h>
#endif
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
#include <linux/pm_runtime.h>
#include <linux/interval_tree.h>
#include <linux/iova_bitmap.h>
#include <linux/iommufd.h>
#include "vfio.h"
#define DRIVER_VERSION "0.3"
#define DRIVER_AUTHOR "Alex Williamson <[email protected]>"
#define DRIVER_DESC "VFIO - User Level meta-driver"
static struct vfio {
struct class *device_class;
struct ida device_ida;
} vfio;
#ifdef CONFIG_VFIO_NOIOMMU
bool vfio_noiommu __read_mostly;
module_param_named(enable_unsafe_noiommu_mode,
vfio_noiommu, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
#endif
static DEFINE_XARRAY(vfio_device_set_xa);
int vfio_assign_device_set(struct vfio_device *device, void *set_id)
{
unsigned long idx = (unsigned long)set_id;
struct vfio_device_set *new_dev_set;
struct vfio_device_set *dev_set;
if (WARN_ON(!set_id))
return -EINVAL;
/*
* Atomically acquire a singleton object in the xarray for this set_id
*/
xa_lock(&vfio_device_set_xa);
dev_set = xa_load(&vfio_device_set_xa, idx);
if (dev_set)
goto found_get_ref;
xa_unlock(&vfio_device_set_xa);
new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL);
if (!new_dev_set)
return -ENOMEM;
mutex_init(&new_dev_set->lock);
INIT_LIST_HEAD(&new_dev_set->device_list);
new_dev_set->set_id = set_id;
xa_lock(&vfio_device_set_xa);
dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set,
GFP_KERNEL);
if (!dev_set) {
dev_set = new_dev_set;
goto found_get_ref;
}
kfree(new_dev_set);
if (xa_is_err(dev_set)) {
xa_unlock(&vfio_device_set_xa);
return xa_err(dev_set);
}
found_get_ref:
dev_set->device_count++;
xa_unlock(&vfio_device_set_xa);
mutex_lock(&dev_set->lock);
device->dev_set = dev_set;
list_add_tail(&device->dev_set_list, &dev_set->device_list);
mutex_unlock(&dev_set->lock);
return 0;
}
EXPORT_SYMBOL_GPL(vfio_assign_device_set);
static void vfio_release_device_set(struct vfio_device *device)
{
struct vfio_device_set *dev_set = device->dev_set;
if (!dev_set)
return;
mutex_lock(&dev_set->lock);
list_del(&device->dev_set_list);
mutex_unlock(&dev_set->lock);
xa_lock(&vfio_device_set_xa);
if (!--dev_set->device_count) {
__xa_erase(&vfio_device_set_xa,
(unsigned long)dev_set->set_id);
mutex_destroy(&dev_set->lock);
kfree(dev_set);
}
xa_unlock(&vfio_device_set_xa);
}
unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
{
struct vfio_device *cur;
unsigned int open_count = 0;
lockdep_assert_held(&dev_set->lock);
list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
open_count += cur->open_count;
return open_count;
}
EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
struct vfio_device *
vfio_find_device_in_devset(struct vfio_device_set *dev_set,
struct device *dev)
{
struct vfio_device *cur;
lockdep_assert_held(&dev_set->lock);
list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
if (cur->dev == dev)
return cur;
return NULL;
}
EXPORT_SYMBOL_GPL(vfio_find_device_in_devset);
/*
* Device objects - create, release, get, put, search
*/
/* Device reference always implies a group reference */
void vfio_device_put_registration(struct vfio_device *device)
{
if (refcount_dec_and_test(&device->refcount))
complete(&device->comp);
}
bool vfio_device_try_get_registration(struct vfio_device *device)
{
return refcount_inc_not_zero(&device->refcount);
}
/*
* VFIO driver API
*/
/* Release helper called by vfio_put_device() */
static void vfio_device_release(struct device *dev)
{
struct vfio_device *device =
container_of(dev, struct vfio_device, device);
vfio_release_device_set(device);
ida_free(&vfio.device_ida, device->index);
if (device->ops->release)
device->ops->release(device);
kvfree(device);
}
static int vfio_init_device(struct vfio_device *device, struct device *dev,
const struct vfio_device_ops *ops);
/*
* Allocate and initialize vfio_device so it can be registered to vfio
* core.
*
* Drivers should use the wrapper vfio_alloc_device() for allocation.
* @size is the size of the structure to be allocated, including any
* private data used by the driver.
*
* Driver may provide an @init callback to cover device private data.
*
* Use vfio_put_device() to release the structure after success return.
*/
struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
const struct vfio_device_ops *ops)
{
struct vfio_device *device;
int ret;
if (WARN_ON(size < sizeof(struct vfio_device)))
return ERR_PTR(-EINVAL);
device = kvzalloc(size, GFP_KERNEL);
if (!device)
return ERR_PTR(-ENOMEM);
ret = vfio_init_device(device, dev, ops);
if (ret)
goto out_free;
return device;
out_free:
kvfree(device);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(_vfio_alloc_device);
/*
* Initialize a vfio_device so it can be registered to vfio core.
*/
static int vfio_init_device(struct vfio_device *device, struct device *dev,
const struct vfio_device_ops *ops)
{
int ret;
ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL);
if (ret < 0) {
dev_dbg(dev, "Error to alloc index\n");
return ret;
}
device->index = ret;
init_completion(&device->comp);
device->dev = dev;
device->ops = ops;
if (ops->init) {
ret = ops->init(device);
if (ret)
goto out_uninit;
}
device_initialize(&device->device);
device->device.release = vfio_device_release;
device->device.class = vfio.device_class;
device->device.parent = device->dev;
return 0;
out_uninit:
vfio_release_device_set(device);
ida_free(&vfio.device_ida, device->index);
return ret;
}
static int __vfio_register_dev(struct vfio_device *device,
enum vfio_group_type type)
{
int ret;
if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) &&
(!device->ops->bind_iommufd ||
!device->ops->unbind_iommufd ||
!device->ops->attach_ioas ||
!device->ops->detach_ioas)))
return -EINVAL;
/*
* If the driver doesn't specify a set then the device is added to a
* singleton set just for itself.
*/
if (!device->dev_set)
vfio_assign_device_set(device, device);
ret = dev_set_name(&device->device, "vfio%d", device->index);
if (ret)
return ret;
ret = vfio_device_set_group(device, type);
if (ret)
return ret;
/*
* VFIO always sets IOMMU_CACHE because we offer no way for userspace to
* restore cache coherency. It has to be checked here because it is only
* valid for cases where we are using iommu groups.
*/
if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) &&
!device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) {
ret = -EINVAL;
goto err_out;
}
ret = vfio_device_add(device);
if (ret)
goto err_out;
/* Refcounting can't start until the driver calls register */
refcount_set(&device->refcount, 1);
vfio_device_group_register(device);
return 0;
err_out:
vfio_device_remove_group(device);
return ret;
}
int vfio_register_group_dev(struct vfio_device *device)
{
return __vfio_register_dev(device, VFIO_IOMMU);
}
EXPORT_SYMBOL_GPL(vfio_register_group_dev);
/*
* Register a virtual device without IOMMU backing. The user of this
* device must not be able to directly trigger unmediated DMA.
*/
int vfio_register_emulated_iommu_dev(struct vfio_device *device)
{
return __vfio_register_dev(device, VFIO_EMULATED_IOMMU);
}
EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
/*
* Decrement the device reference count and wait for the device to be
* removed. Open file descriptors for the device... */
void vfio_unregister_group_dev(struct vfio_device *device)
{
unsigned int i = 0;
bool interrupted = false;
long rc;
/*
* Prevent new device opened by userspace via the
* VFIO_GROUP_GET_DEVICE_FD in the group path.
*/
vfio_device_group_unregister(device);
/*
* Balances vfio_device_add() in register path, also prevents
* new device opened by userspace in the cdev path.
*/
vfio_device_del(device);
vfio_device_put_registration(device);
rc = try_wait_for_completion(&device->comp);
while (rc <= 0) {
if (device->ops->request)
device->ops->request(device, i++);
if (interrupted) {
rc = wait_for_completion_timeout(&device->comp,
HZ * 10);
} else {
rc = wait_for_completion_interruptible_timeout(
&device->comp, HZ * 10);
if (rc < 0) {
interrupted = true;
dev_warn(device->dev,
"Device is currently in use, task"
" \"%s\" (%d) "
"blocked until device is released",
current->comm, task_pid_nr(current));
}
}
}
/* Balances vfio_device_set_group in register path */
vfio_device_remove_group(device);
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
#ifdef CONFIG_HAVE_KVM
void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
{
void (*pfn)(struct kvm *kvm);
bool (*fn)(struct kvm *kvm);
bool ret;
lockdep_assert_held(&device->dev_set->lock);
if (!kvm)
return;
pfn = symbol_get(kvm_put_kvm);
if (WARN_ON(!pfn))
return;
fn = symbol_get(kvm_get_kvm_safe);
if (WARN_ON(!fn)) {
symbol_put(kvm_put_kvm);
return;
}
ret = fn(kvm);
symbol_put(kvm_get_kvm_safe);
if (!ret) {
symbol_put(kvm_put_kvm);
return;
}
device->put_kvm = pfn;
device->kvm = kvm;
}
void vfio_device_put_kvm(struct vfio_device *device)
{
lockdep_assert_held(&device->dev_set->lock);
if (!device->kvm)
return;
if (WARN_ON(!device->put_kvm))
goto clear;
device->put_kvm(device->kvm);
device->put_kvm = NULL;
symbol_put(kvm_put_kvm);
clear:
device->kvm = NULL;
}
#endif
/* true if the vfio_device has open_device() called but not close_device() */
static bool vfio_assert_device_open(struct vfio_device *device)
{
return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}
struct vfio_device_file *
vfio_allocate_device_file(struct vfio_device *device)
{
struct vfio_device_file *df;
df = kzalloc(sizeof(*df), GFP_KERNEL_ACCOUNT);
if (!df)
return ERR_PTR(-ENOMEM);
df->device = device;
spin_lock_init(&df->kvm_ref_lock);
return df;
}
static int vfio_df_device_first_open(struct vfio_device_file *df)
{
struct vfio_device *device = df->device;
struct iommufd_ctx *iommufd = df->iommufd;
int ret;
lockdep_assert_held(&device->dev_set->lock);
if (!try_module_get(device->dev->driver->owner))
return -ENODEV;
if (iommufd)
ret = vfio_df_iommufd_bind(df);
else
ret = vfio_device_group_use_iommu(device);
if (ret)
goto err_module_put;
if (device->ops->open_device) {
ret = device->ops->open_device(device);
if (ret)
goto err_unuse_iommu;
}
return 0;
err_unuse_iommu:
if (iommufd)
vfio_df_iommufd_unbind(df);
else
vfio_device_group_unuse_iommu(device);
err_module_put:
module_put(device->dev->driver->owner);
return ret;
}
static void vfio_df_device_last_close(struct vfio_device_file *df)
{
struct vfio_device *device = df->device;
struct iommufd_ctx *iommufd = df->iommufd;
lockdep_assert_held(&device->dev_set->lock);
if (device->ops->close_device)
device->ops->close_device(device);
if (iommufd)
vfio_df_iommufd_unbind(df);
else
vfio_device_group_unuse_iommu(device);
module_put(device->dev->driver->owner);
}
int vfio_df_open(struct vfio_device_file *df)
{
struct vfio_device *device = df->device;
int ret = 0;
lockdep_assert_held(&device->dev_set->lock);
/*
* Only the group path allows the device to be opened multiple
* times. The device cdev path doesn't have a secure way for it.
*/
if (device->open_count != 0 && !df->group)
return -EINVAL;
device->open_count++;
if (device->open_count == 1) {
ret = vfio_df_device_first_open(df);
if (ret)
device->open_count--;
}
return ret;
}
void vfio_df_close(struct vfio_device_file *df)
{
struct vfio_device *device = df->device;
lockdep_assert_held(&device->dev_set->lock);
vfio_assert_device_open(device);
if (device->open_count == 1)
vfio_df_device_last_close(df);
device->open_count--;
}
/*
* Wrapper around pm_runtime_resume_and_get().
* Return error code on failure or 0 on success.
*/
static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
{
struct device *dev = device->dev;
if (dev->driver && dev->driver->pm) {
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret) {
dev_info_ratelimited(dev,
"vfio: runtime resume failed %d\n", ret);
return -EIO;
}
}
return 0;
}
/*
* Wrapper around pm_runtime_put().
*/
static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
{
struct device *dev = device->dev;
if (dev->driver && dev->driver->pm)
pm_runtime_put(dev);
}
/*
* VFIO Device fd
*/
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device_file *df = filep->private_data;
struct vfio_device *device = df->device;
if (df->group)
vfio_df_group_close(df);
else
vfio_df_unbind_iommufd(df);
vfio_device_put_registration(device);
kfree(df);
return 0;
}
/*
* vfio_mig_get_next_state - Compute the next step in the FSM
* @cur_fsm - The current state the device is in
* @new_fsm - The target state to reach
* @next_fsm - Pointer to the next step to get to new_fsm
*
* Return 0 upon success, otherwise -errno
* Upon success the next step in the state progression between cur_fsm and
* new_fsm will be set in next_fsm.
*
* This breaks down requests for combination transitions into smaller steps and
* returns the next step to get to new_fsm. The function may need to be called
* multiple times before reaching new_fsm.
*
*/
int vfio_mig_get_next_state(struct vfio_device *device,
enum vfio_device_mig_state cur_fsm,
enum vfio_device_mig_state new_fsm,
enum vfio_device_mig_state *next_fsm)
{
enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_PRE_COPY_P2P + 1 };
/*
* The coding in this table requires the driver to implement the
* following FSM arcs:
* RESUMING -> STOP
* STOP -> RESUMING
* STOP -> STOP_COPY
* STOP_COPY -> STOP
*
* If P2P is supported then the driver must also implement these FSM
* arcs:
* RUNNING -> RUNNING_P2P
* RUNNING_P2P -> RUNNING
* RUNNING_P2P -> STOP
* STOP -> RUNNING_P2P
*
* If precopy is supported then the driver must support these additional
* FSM arcs:
* RUNNING -> PRE_COPY
* PRE_COPY -> RUNNING
* PRE_COPY -> STOP_COPY
* However, if precopy and P2P are supported together then the driver
* must support these additional arcs beyond the P2P arcs above:
* PRE_COPY -> RUNNING
* PRE_COPY -> PRE_COPY_P2P
* PRE_COPY_P2P -> PRE_COPY
* PRE_COPY_P2P -> RUNNING_P2P
* PRE_COPY_P2P -> STOP_COPY
* RUNNING -> PRE_COPY
* RUNNING_P2P -> PRE_COPY_P2P
*
* Without P2P and precopy the driver must implement:
* RUNNING -> STOP
* STOP -> RUNNING
*
* The coding will step through multiple states for some combination
* transitions; if all optional features are supported, this means the
* following ones:
* PRE_COPY -> PRE_COPY_P2P -> STOP_COPY
* PRE_COPY -> RUNNING -> RUNNING_P2P
* PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP
* PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP -> RESUMING
* PRE_COPY_P2P -> RUNNING_P2P -> RUNNING
* PRE_COPY_P2P -> RUNNING_P2P -> STOP
* PRE_COPY_P2P -> RUNNING_P2P -> STOP -> RESUMING
* RESUMING -> STOP -> RUNNING_P2P
* RESUMING -> STOP -> RUNNING_P2P -> PRE_COPY_P2P
* RESUMING -> STOP -> RUNNING_P2P -> RUNNING
* RESUMING -> STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY
* RESUMING -> STOP -> STOP_COPY
* RUNNING -> RUNNING_P2P -> PRE_COPY_P2P
* RUNNING -> RUNNING_P2P -> STOP
* RUNNING -> RUNNING_P2P -> STOP -> RESUMING
* RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY
* RUNNING_P2P -> RUNNING -> PRE_COPY
* RUNNING_P2P -> STOP -> RESUMING
* RUNNING_P2P -> STOP -> STOP_COPY
* STOP -> RUNNING_P2P -> PRE_COPY_P2P
* STOP -> RUNNING_P2P -> RUNNING
* STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY
* STOP_COPY -> STOP -> RESUMING
* STOP_COPY -> STOP -> RUNNING_P2P
* STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING
*
* The following transitions are blocked:
* STOP_COPY -> PRE_COPY
* STOP_COPY -> PRE_COPY_P2P
*/
static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = {
[VFIO_DEVICE_STATE_STOP] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
[VFIO_DEVICE_STATE_RUNNING] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
[VFIO_DEVICE_STATE_PRE_COPY] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
[VFIO_DEVICE_STATE_STOP_COPY] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
[VFIO_DEVICE_STATE_RESUMING] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
[VFIO_DEVICE_STATE_RUNNING_P2P] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
[VFIO_DEVICE_STATE_ERROR] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR,
[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
},
};
static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = {
[VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY,
[VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY,
[VFIO_DEVICE_STATE_PRE_COPY] =
VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY,
[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P |
VFIO_MIGRATION_PRE_COPY,
[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY,
[VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY,
[VFIO_DEVICE_STATE_RUNNING_P2P] =
VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P,
[VFIO_DEVICE_STATE_ERROR] = ~0U,
};
if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
(state_flags_table[cur_fsm] & device->migration_flags) !=
state_flags_table[cur_fsm]))
return -EINVAL;
if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
(state_flags_table[new_fsm] & device->migration_flags) !=
state_flags_table[new_fsm])
return -EINVAL;
/*
* Arcs touching optional and unsupported states are skipped over. The
* driver will instead see an arc from the original state to the next
* logical state, as per the above comment.
*/
*next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm];
while ((state_flags_table[*next_fsm] & device->migration_flags) !=
state_flags_table[*next_fsm])
*next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm];
return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(vfio_mig_get_next_state);
/*
* Convert the drivers's struct file into a FD number and return it to userspace
*/
static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg,
struct vfio_device_feature_mig_state *mig)
{
int ret;
int fd;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_fput;
}
mig->data_fd = fd;
if (copy_to_user(arg, mig, sizeof(*mig))) {
ret = -EFAULT;
goto out_put_unused;
}
fd_install(fd, filp);
return 0;
out_put_unused:
put_unused_fd(fd);
out_fput:
fput(filp);
return ret;
}
static int
vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
u32 flags, void __user *arg,
size_t argsz)
{
size_t minsz =
offsetofend(struct vfio_device_feature_mig_state, data_fd);
struct vfio_device_feature_mig_state mig;
struct file *filp = NULL;
int ret;
if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
VFIO_DEVICE_FEATURE_SET |
VFIO_DEVICE_FEATURE_GET,
sizeof(mig));
if (ret != 1)
return ret;
if (copy_from_user(&mig, arg, minsz))
return -EFAULT;
if (flags & VFIO_DEVICE_FEATURE_GET) {
enum vfio_device_mig_state curr_state;
ret = device->mig_ops->migration_get_state(device,
&curr_state);
if (ret)
return ret;
mig.device_state = curr_state;
goto out_copy;
}
/* Handle the VFIO_DEVICE_FEATURE_SET */
filp = device->mig_ops->migration_set_state(device, mig.device_state);
if (IS_ERR(filp) || !filp)
goto out_copy;
return vfio_ioct_mig_return_fd(filp, arg, &mig);
out_copy:
mig.data_fd = -1;
if (copy_to_user(arg, &mig, sizeof(mig)))
return -EFAULT;
if (IS_ERR(filp))
return PTR_ERR(filp);
return 0;
}
static int
vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device,
u32 flags, void __user *arg,
size_t argsz)
{
struct vfio_device_feature_mig_data_size data_size = {};
unsigned long stop_copy_length;
int ret;
if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
sizeof(data_size));
if (ret != 1)
return ret;
ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length);
if (ret)
return ret;
data_size.stop_copy_length = stop_copy_length;
if (copy_to_user(arg, &data_size, sizeof(data_size)))
return -EFAULT;
return 0;
}
static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
u32 flags, void __user *arg,
size_t argsz)
{
struct vfio_device_feature_migration mig = {
.flags = device->migration_flags,
};
int ret;
if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
sizeof(mig));
if (ret != 1)
return ret;
if (copy_to_user(arg, &mig, sizeof(mig)))
return -EFAULT;
return 0;
}
void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
u32 req_nodes)
{
struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
unsigned long min_gap, curr_gap;
/* Special shortcut when a single range is required */
if (req_nodes == 1) {
unsigned long last;
comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
curr = comb_start;
while (curr) {
last = curr->last;
prev = curr;
curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
if (prev != comb_start)
interval_tree_remove(prev, root);
}
comb_start->last = last;
return;
}
/* Combine ranges which have the smallest gap */
while (cur_nodes > req_nodes) {
prev = NULL;
min_gap = ULONG_MAX;
curr = interval_tree_iter_first(root, 0, ULONG_MAX);
while (curr) {
if (prev) {
curr_gap = curr->start - prev->last;
if (curr_gap < min_gap) {
min_gap = curr_gap;
comb_start = prev;
comb_end = curr;
}
}
prev = curr;
curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
}
comb_start->last = comb_end->last;
interval_tree_remove(comb_end, root);
cur_nodes--;
}
}
EXPORT_SYMBOL_GPL(vfio_combine_iova_ranges);
/* Ranges should fit into a single kernel page */
#define LOG_MAX_RANGES \
(PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
static int
vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
u32 flags, void __user *arg,
size_t argsz)
{
size_t minsz =
offsetofend(struct vfio_device_feature_dma_logging_control,
ranges);
struct vfio_device_feature_dma_logging_range __user *ranges;
struct vfio_device_feature_dma_logging_control control;
struct vfio_device_feature_dma_logging_range range;
struct rb_root_cached root = RB_ROOT_CACHED;
struct interval_tree_node *nodes;
u64 iova_end;
u32 nnodes;
int i, ret;
if (!device->log_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
VFIO_DEVICE_FEATURE_SET,
sizeof(control));
if (ret != 1)
return ret;
if (copy_from_user(&control, arg, minsz))
return -EFAULT;
nnodes = control.num_ranges;
if (!nnodes)
return -EINVAL;
if (nnodes > LOG_MAX_RANGES)
return -E2BIG;
ranges = u64_to_user_ptr(control.ranges);
nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
GFP_KERNEL);
if (!nodes)
return -ENOMEM;
for (i = 0; i < nnodes; i++) {
if (copy_from_user(&range, &ranges[i], sizeof(range))) {
ret = -EFAULT;
goto end;
}
if (!IS_ALIGNED(range.iova, control.page_size) ||
!IS_ALIGNED(range.length, control.page_size)) {
ret = -EINVAL;
goto end;
}
if (check_add_overflow(range.iova, range.length, &iova_end) ||
iova_end > ULONG_MAX) {
ret = -EOVERFLOW;
goto end;
}
nodes[i].start = range.iova;
nodes[i].last = range.iova + range.length - 1;
if (interval_tree_iter_first(&root, nodes[i].start,
nodes[i].last)) {
/* Range overlapping */
ret = -EINVAL;
goto end;
}
interval_tree_insert(nodes + i, &root);
}
ret = device->log_ops->log_start(device, &root, nnodes,
&control.page_size);
if (ret)
goto end;
if (copy_to_user(arg, &control, sizeof(control))) {
ret = -EFAULT;
device->log_ops->log_stop(device);
}
end:
kfree(nodes);
return ret;
}
static int
vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
u32 flags, void __user *arg,
size_t argsz)
{
int ret;
if (!device->log_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
VFIO_DEVICE_FEATURE_SET, 0);
if (ret != 1)
return ret;
return device->log_ops->log_stop(device);
}
static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
unsigned long iova, size_t length,
void *opaque)
{
struct vfio_device *device = opaque;
return device->log_ops->log_read_and_clear(device, iova, length, iter);
}
static int
vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
u32 flags, void __user *arg,
size_t argsz)
{
size_t minsz =
offsetofend(struct vfio_device_feature_dma_logging_report,
bitmap);
struct vfio_device_feature_dma_logging_report report;
struct iova_bitmap *iter;
u64 iova_end;
int ret;
if (!device->log_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
VFIO_DEVICE_FEATURE_GET,
sizeof(report));
if (ret != 1)
return ret;
if (copy_from_user(&report, arg, minsz))
return -EFAULT;
if (report.page_size < SZ_4K || !is_power_of_2(report.page_size))
return -EINVAL;
if (check_add_overflow(report.iova, report.length, &iova_end) ||
iova_end > ULONG_MAX)
return -EOVERFLOW;
iter = iova_bitmap_alloc(report.iova, report.length,
report.page_size,
u64_to_user_ptr(report.bitmap));
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = iova_bitmap_for_each(iter, device,
vfio_device_log_read_and_clear);
iova_bitmap_free(iter);
return ret;
}
static int vfio_ioctl_device_feature(struct vfio_device *device,
struct vfio_device_feature __user *arg)
{
size_t minsz = offsetofend(struct vfio_device_feature, flags);
struct vfio_device_feature feature;
if (copy_from_user(&feature, arg, minsz))
return -EFAULT;
if (feature.argsz < minsz)
return -EINVAL;
/* Check unknown flags */
if (feature.flags &
~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET |
VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE))
return -EINVAL;
/* GET & SET are mutually exclusive except with PROBE */
if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
(feature.flags & VFIO_DEVICE_FEATURE_SET) &&
(feature.flags & VFIO_DEVICE_FEATURE_GET))
return -EINVAL;
switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
case VFIO_DEVICE_FEATURE_MIGRATION:
return vfio_ioctl_device_feature_migration(
device, feature.flags, arg->data,
feature.argsz - minsz);
case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE:
return vfio_ioctl_device_feature_mig_device_state(
device, feature.flags, arg->data,
feature.argsz - minsz);
case VFIO_DEVICE_FEATURE_DMA_LOGGING_START:
return vfio_ioctl_device_feature_logging_start(
device, feature.flags, arg->data,
feature.argsz - minsz);
case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP:
return vfio_ioctl_device_feature_logging_stop(
device, feature.flags, arg->data,
feature.argsz - minsz);
case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT:
return vfio_ioctl_device_feature_logging_report(
device, feature.flags, arg->data,
feature.argsz - minsz);
case VFIO_DEVICE_FEATURE_MIG_DATA_SIZE:
return vfio_ioctl_device_feature_migration_data_size(
device, feature.flags, arg->data,
feature.argsz - minsz);
default:
if (unlikely(!device->ops->device_feature))
return -EINVAL;
return device->ops->device_feature(device, feature.flags,
arg->data,
feature.argsz - minsz);
}
}
static long vfio_device_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct vfio_device_file *df = filep->private_data;
struct vfio_device *device = df->device;
void __user *uptr = (void __user *)arg;
int ret;
if (cmd == VFIO_DEVICE_BIND_IOMMUFD)
return vfio_df_ioctl_bind_iommufd(df, uptr);
/* Paired with smp_store_release() following vfio_df_open() */
if (!smp_load_acquire(&df->access_granted))
return -EINVAL;
ret = vfio_device_pm_runtime_get(device);
if (ret)
return ret;
/* cdev only ioctls */
if (IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV) && !df->group) {
switch (cmd) {
case VFIO_DEVICE_ATTACH_IOMMUFD_PT:
ret = vfio_df_ioctl_attach_pt(df, uptr);
goto out;
case VFIO_DEVICE_DETACH_IOMMUFD_PT:
ret = vfio_df_ioctl_detach_pt(df, uptr);
goto out;
}
}
switch (cmd) {
case VFIO_DEVICE_FEATURE:
ret = vfio_ioctl_device_feature(device, uptr);
break;
default:
if (unlikely(!device->ops->ioctl))
ret = -EINVAL;
else
ret = device->ops->ioctl(device, cmd, arg);
break;
}
out:
vfio_device_pm_runtime_put(device);
return ret;
}
static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_device_file *df = filep->private_data;
struct vfio_device *device = df->device;
/* Paired with smp_store_release() following vfio_df_open() */
if (!smp_load_acquire(&df->access_granted))
return -EINVAL;
if (unlikely(!device->ops->read))
return -EINVAL;
return device->ops->read(device, buf, count, ppos);
}
static ssize_t vfio_device_fops_write(struct file *filep,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_device_file *df = filep->private_data;
struct vfio_device *device = df->device;
/* Paired with smp_store_release() following vfio_df_open() */
if (!smp_load_acquire(&df->access_granted))
return -EINVAL;
if (unlikely(!device->ops->write))
return -EINVAL;
return device->ops->write(device, buf, count, ppos);
}
static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct vfio_device_file *df = filep->private_data;
struct vfio_device *device = df->device;
/* Paired with smp_store_release() following vfio_df_open() */
if (!smp_load_acquire(&df->access_granted))
return -EINVAL;
if (unlikely(!device->ops->mmap))
return -EINVAL;
return device->ops->mmap(device, vma);
}
const struct file_operations vfio_device_fops = {
.owner = THIS_MODULE,
.open = vfio_device_fops_cdev_open,
.release = vfio_device_fops_release,
.read = vfio_device_fops_read,
.write = vfio_device_fops_write,
.unlocked_ioctl = vfio_device_fops_unl_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.mmap = vfio_device_fops_mmap,
};
static struct vfio_device *vfio_device_from_file(struct file *file)
{
struct vfio_device_file *df = file->private_data;
if (file->f_op != &vfio_device_fops)
return NULL;
return df->device;
}
/**
* vfio_file_is_valid - True if the file is valid vfio file
* @file: VFIO group file or VFIO device file
*/
bool vfio_file_is_valid(struct file *file)
{
return vfio_group_from_file(file) ||
vfio_device_from_file(file);
}
EXPORT_SYMBOL_GPL(vfio_file_is_valid);
/**
* vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
* is always CPU cache coherent
* @file: VFIO group file or VFIO device file
*
* Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
* bit in DMA transactions. A return of false indicates that the user has
* rights to access additional instructions such as wbinvd on x86.
*/
bool vfio_file_enforced_coherent(struct file *file)
{
struct vfio_device *device;
struct vfio_group *group;
group = vfio_group_from_file(file);
if (group)
return vfio_group_enforced_coherent(group);
device = vfio_device_from_file(file);
if (device)
return device_iommu_capable(device->dev,
IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
return true;
}
EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
{
struct vfio_device_file *df = file->private_data;
/*
* The kvm is first recorded in the vfio_device_file, and will
* be propagated to vfio_device::kvm when the file is bound to
* iommufd successfully in the vfio device cdev path.
*/
spin_lock(&df->kvm_ref_lock);
df->kvm = kvm;
spin_unlock(&df->kvm_ref_lock);
}
/**
* vfio_file_set_kvm - Link a kvm with VFIO drivers
* @file: VFIO group file or VFIO device file
* @kvm: KVM to link
*
* When a VFIO device is first opened the KVM will be available in
* device->kvm if one was associated with the file.
*/
void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
struct vfio_group *group;
group = vfio_group_from_file(file);
if (group)
vfio_group_set_kvm(group, kvm);
if (vfio_device_from_file(file))
vfio_device_file_set_kvm(file, kvm);
}
EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
/*
* Sub-module support
*/
/*
* Helper for managing a buffer of info chain capabilities, allocate or
* reallocate a buffer with additional @size, filling in @id and @version
* of the capability. A pointer to the new capability is returned.
*
* NB. The chain is based at the head of the buffer, so new entries are
* added to the tail, vfio_info_cap_shift() should be called to fixup the
* next offsets prior to copying to the user buffer.
*/
struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
size_t size, u16 id, u16 version)
{
void *buf;
struct vfio_info_cap_header *header, *tmp;
/* Ensure that the next capability struct will be aligned */
size = ALIGN(size, sizeof(u64));
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
caps->buf = NULL;
caps->size = 0;
return ERR_PTR(-ENOMEM);
}
caps->buf = buf;
header = buf + caps->size;
/* Eventually copied to user buffer, zero */
memset(header, 0, size);
header->id = id;
header->version = version;
/* Add to the end of the capability chain */
for (tmp = buf; tmp->next; tmp = buf + tmp->next)
; /* nothing */
tmp->next = caps->size;
caps->size += size;
return header;
}
EXPORT_SYMBOL_GPL(vfio_info_cap_add);
void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
{
struct vfio_info_cap_header *tmp;
void *buf = (void *)caps->buf;
/* Capability structs should start with proper alignment */
WARN_ON(!IS_ALIGNED(offset, sizeof(u64)));
for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
tmp->next += offset;
}
EXPORT_SYMBOL(vfio_info_cap_shift);
int vfio_info_add_capability(struct vfio_info_cap *caps,
struct vfio_info_cap_header *cap, size_t size)
{
struct vfio_info_cap_header *header;
header = vfio_info_cap_add(caps, size, cap->id, cap->version);
if (IS_ERR(header))
return PTR_ERR(header);
memcpy(header + 1, cap + 1, size - sizeof(*header));
return 0;
}
EXPORT_SYMBOL(vfio_info_add_capability);
int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
int max_irq_type, size_t *data_size)
{
unsigned long minsz;
size_t size;
minsz = offsetofend(struct vfio_irq_set, count);
if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
(hdr->count >= (U32_MAX - hdr->start)) ||
(hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK)))
return -EINVAL;
if (data_size)
*data_size = 0;
if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
return -EINVAL;
switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_NONE:
size = 0;
break;
case VFIO_IRQ_SET_DATA_BOOL:
size = sizeof(uint8_t);
break;
case VFIO_IRQ_SET_DATA_EVENTFD:
size = sizeof(int32_t);
break;
default:
return -EINVAL;
}
if (size) {
if (hdr->argsz - minsz < hdr->count * size)
return -EINVAL;
if (!data_size)
return -EINVAL;
*data_size = hdr->count * size;
}
return 0;
}
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
* Pin contiguous user pages and return their associated host pages for local
* domain only.
* @device [in] : device
* @iova [in] : starting IOVA of user pages to be pinned.
* @npage [in] : count of pages to be pinned. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* @prot [in] : protection flags
* @pages[out] : array of host pages
* Return error or number of pages pinned.
*
* A driver may only call this function if the vfio_device was created
* by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages().
*/
int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
int npage, int prot, struct page **pages)
{
/* group->container cannot change while a vfio device is open */
if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
return -EINVAL;
if (!device->ops->dma_unmap)
return -EINVAL;
if (vfio_device_has_container(device))
return vfio_device_container_pin_pages(device, iova,
npage, prot, pages);
if (device->iommufd_access) {
int ret;
if (iova > ULONG_MAX)
return -EINVAL;
/*
* VFIO ignores the sub page offset, npages is from the start of
* a PAGE_SIZE chunk of IOVA. The caller is expected to recover
* the sub page offset by doing:
* pages[0] + (iova % PAGE_SIZE)
*/
ret = iommufd_access_pin_pages(
device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE),
npage * PAGE_SIZE, pages,
(prot & IOMMU_WRITE) ? IOMMUFD_ACCESS_RW_WRITE : 0);
if (ret)
return ret;
return npage;
}
return -EINVAL;
}
EXPORT_SYMBOL(vfio_pin_pages);
/*
* Unpin contiguous host pages for local domain only.
* @device [in] : device
* @iova [in] : starting address of user pages to be unpinned.
* @npage [in] : count of pages to be unpinned. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
*/
void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{
if (WARN_ON(!vfio_assert_device_open(device)))
return;
if (WARN_ON(!device->ops->dma_unmap))
return;
if (vfio_device_has_container(device)) {
vfio_device_container_unpin_pages(device, iova, npage);
return;
}
if (device->iommufd_access) {
if (WARN_ON(iova > ULONG_MAX))
return;
iommufd_access_unpin_pages(device->iommufd_access,
ALIGN_DOWN(iova, PAGE_SIZE),
npage * PAGE_SIZE);
return;
}
}
EXPORT_SYMBOL(vfio_unpin_pages);
/*
* This interface allows the CPUs to perform some sort of virtual DMA on
* behalf of the device.
*
* CPUs read/write from/into a range of IOVAs pointing to user space memory
* into/from a kernel buffer.
*
* As the read/write of user space memory is conducted via the CPUs and is
* not a real device DMA, it is not necessary to pin the user space memory.
*
* @device [in] : VFIO device
* @iova [in] : base IOVA of a user space buffer
* @data [in] : pointer to kernel buffer
* @len [in] : kernel buffer length
* @write : indicate read or write
* Return error code on failure or 0 on success.
*/
int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
size_t len, bool write)
{
if (!data || len <= 0 || !vfio_assert_device_open(device))
return -EINVAL;
if (vfio_device_has_container(device))
return vfio_device_container_dma_rw(device, iova,
data, len, write);
if (device->iommufd_access) {
unsigned int flags = 0;
if (iova > ULONG_MAX)
return -EINVAL;
/* VFIO historically tries to auto-detect a kthread */
if (!current->mm)
flags |= IOMMUFD_ACCESS_RW_KTHREAD;
if (write)
flags |= IOMMUFD_ACCESS_RW_WRITE;
return iommufd_access_rw(device->iommufd_access, iova, data,
len, flags);
}
return -EINVAL;
}
EXPORT_SYMBOL(vfio_dma_rw);
/*
* Module/class support
*/
static int __init vfio_init(void)
{
int ret;
ida_init(&vfio.device_ida);
ret = vfio_group_init();
if (ret)
return ret;
ret = vfio_virqfd_init();
if (ret)
goto err_virqfd;
/* /sys/class/vfio-dev/vfioX */
vfio.device_class = class_create("vfio-dev");
if (IS_ERR(vfio.device_class)) {
ret = PTR_ERR(vfio.device_class);
goto err_dev_class;
}
ret = vfio_cdev_init(vfio.device_class);
if (ret)
goto err_alloc_dev_chrdev;
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
err_alloc_dev_chrdev:
class_destroy(vfio.device_class);
vfio.device_class = NULL;
err_dev_class:
vfio_virqfd_exit();
err_virqfd:
vfio_group_cleanup();
return ret;
}
static void __exit vfio_cleanup(void)
{
ida_destroy(&vfio.device_ida);
vfio_cdev_cleanup();
class_destroy(vfio.device_class);
vfio.device_class = NULL;
vfio_virqfd_exit();
vfio_group_cleanup();
xa_destroy(&vfio_device_set_xa);
}
module_init(vfio_init);
module_exit(vfio_cleanup);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");
| linux-master | drivers/vfio/vfio_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI interrupt handling
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/eventfd.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/file.h>
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include "vfio_pci_priv.h"
struct vfio_pci_irq_ctx {
struct eventfd_ctx *trigger;
struct virqfd *unmask;
struct virqfd *mask;
char *name;
bool masked;
struct irq_bypass_producer producer;
};
static bool irq_is(struct vfio_pci_core_device *vdev, int type)
{
return vdev->irq_type == type;
}
static bool is_intx(struct vfio_pci_core_device *vdev)
{
return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
}
static bool is_irq_none(struct vfio_pci_core_device *vdev)
{
return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
}
static
struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev,
unsigned long index)
{
return xa_load(&vdev->ctx, index);
}
static void vfio_irq_ctx_free(struct vfio_pci_core_device *vdev,
struct vfio_pci_irq_ctx *ctx, unsigned long index)
{
xa_erase(&vdev->ctx, index);
kfree(ctx);
}
static struct vfio_pci_irq_ctx *
vfio_irq_ctx_alloc(struct vfio_pci_core_device *vdev, unsigned long index)
{
struct vfio_pci_irq_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
if (!ctx)
return NULL;
ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT);
if (ret) {
kfree(ctx);
return NULL;
}
return ctx;
}
/*
* INTx
*/
static void vfio_send_intx_eventfd(void *opaque, void *unused)
{
struct vfio_pci_core_device *vdev = opaque;
if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
struct vfio_pci_irq_ctx *ctx;
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return;
eventfd_signal(ctx->trigger, 1);
}
}
/* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
unsigned long flags;
bool masked_changed = false;
spin_lock_irqsave(&vdev->irqlock, flags);
/*
* Masking can come from interrupt, ioctl, or config space
* via INTx disable. The latter means this can get called
* even when not using intx delivery. In this case, just
* try to have the physical bit follow the virtual bit.
*/
if (unlikely(!is_intx(vdev))) {
if (vdev->pci_2_3)
pci_intx(pdev, 0);
goto out_unlock;
}
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
goto out_unlock;
if (!ctx->masked) {
/*
* Can't use check_and_mask here because we always want to
* mask, not just when something is pending.
*/
if (vdev->pci_2_3)
pci_intx(pdev, 0);
else
disable_irq_nosync(pdev->irq);
ctx->masked = true;
masked_changed = true;
}
out_unlock:
spin_unlock_irqrestore(&vdev->irqlock, flags);
return masked_changed;
}
/*
* If this is triggered by an eventfd, we can't call eventfd_signal
* or else we'll deadlock on the eventfd wait queue. Return >0 when
* a signal is necessary, which can then be handled via a work queue
* or directly depending on the caller.
*/
static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
{
struct vfio_pci_core_device *vdev = opaque;
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&vdev->irqlock, flags);
/*
* Unmasking comes from ioctl or config, so again, have the
* physical bit follow the virtual even when not using INTx.
*/
if (unlikely(!is_intx(vdev))) {
if (vdev->pci_2_3)
pci_intx(pdev, 1);
goto out_unlock;
}
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
goto out_unlock;
if (ctx->masked && !vdev->virq_disabled) {
/*
* A pending interrupt here would immediately trigger,
* but we can avoid that overhead by just re-sending
* the interrupt to the user.
*/
if (vdev->pci_2_3) {
if (!pci_check_and_unmask_intx(pdev))
ret = 1;
} else
enable_irq(pdev->irq);
ctx->masked = (ret > 0);
}
out_unlock:
spin_unlock_irqrestore(&vdev->irqlock, flags);
return ret;
}
void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
{
if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
vfio_send_intx_eventfd(vdev, NULL);
}
static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
{
struct vfio_pci_core_device *vdev = dev_id;
struct vfio_pci_irq_ctx *ctx;
unsigned long flags;
int ret = IRQ_NONE;
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return ret;
spin_lock_irqsave(&vdev->irqlock, flags);
if (!vdev->pci_2_3) {
disable_irq_nosync(vdev->pdev->irq);
ctx->masked = true;
ret = IRQ_HANDLED;
} else if (!ctx->masked && /* may be shared */
pci_check_and_mask_intx(vdev->pdev)) {
ctx->masked = true;
ret = IRQ_HANDLED;
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
if (ret == IRQ_HANDLED)
vfio_send_intx_eventfd(vdev, NULL);
return ret;
}
static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
{
struct vfio_pci_irq_ctx *ctx;
if (!is_irq_none(vdev))
return -EINVAL;
if (!vdev->pdev->irq)
return -ENODEV;
ctx = vfio_irq_ctx_alloc(vdev, 0);
if (!ctx)
return -ENOMEM;
/*
* If the virtual interrupt is masked, restore it. Devices
* supporting DisINTx can be masked at the hardware level
* here, non-PCI-2.3 devices will have to wait until the
* interrupt is enabled.
*/
ctx->masked = vdev->virq_disabled;
if (vdev->pci_2_3)
pci_intx(vdev->pdev, !ctx->masked);
vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
return 0;
}
static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long irqflags = IRQF_SHARED;
struct vfio_pci_irq_ctx *ctx;
struct eventfd_ctx *trigger;
unsigned long flags;
int ret;
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return -EINVAL;
if (ctx->trigger) {
free_irq(pdev->irq, vdev);
kfree(ctx->name);
eventfd_ctx_put(ctx->trigger);
ctx->trigger = NULL;
}
if (fd < 0) /* Disable only */
return 0;
ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
pci_name(pdev));
if (!ctx->name)
return -ENOMEM;
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
kfree(ctx->name);
return PTR_ERR(trigger);
}
ctx->trigger = trigger;
if (!vdev->pci_2_3)
irqflags = 0;
ret = request_irq(pdev->irq, vfio_intx_handler,
irqflags, ctx->name, vdev);
if (ret) {
ctx->trigger = NULL;
kfree(ctx->name);
eventfd_ctx_put(trigger);
return ret;
}
/*
* INTx disable will stick across the new irq setup,
* disable_irq won't.
*/
spin_lock_irqsave(&vdev->irqlock, flags);
if (!vdev->pci_2_3 && ctx->masked)
disable_irq_nosync(pdev->irq);
spin_unlock_irqrestore(&vdev->irqlock, flags);
return 0;
}
static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
{
struct vfio_pci_irq_ctx *ctx;
ctx = vfio_irq_ctx_get(vdev, 0);
WARN_ON_ONCE(!ctx);
if (ctx) {
vfio_virqfd_disable(&ctx->unmask);
vfio_virqfd_disable(&ctx->mask);
}
vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vfio_irq_ctx_free(vdev, ctx, 0);
}
/*
* MSI/MSI-X
*/
static irqreturn_t vfio_msihandler(int irq, void *arg)
{
struct eventfd_ctx *trigger = arg;
eventfd_signal(trigger, 1);
return IRQ_HANDLED;
}
static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
u16 cmd;
if (!is_irq_none(vdev))
return -EINVAL;
/* return the number of supported vectors if we can't get all: */
cmd = vfio_pci_memory_lock_and_enable(vdev);
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) {
if (ret > 0)
pci_free_irq_vectors(pdev);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
return ret;
}
vfio_pci_memory_unlock_and_restore(vdev, cmd);
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
VFIO_PCI_MSI_IRQ_INDEX;
if (!msix) {
/*
* Compute the virtual hardware field for max msi vectors -
* it is the log base 2 of the number of vectors.
*/
vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
}
return 0;
}
/*
* vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
* interrupt vector. If a Linux IRQ number is not available then a new
* interrupt is allocated if dynamic MSI-X is supported.
*
* Where is vfio_msi_free_irq()? Allocated interrupts are maintained,
* essentially forming a cache that subsequent allocations can draw from.
* Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
* disabled.
*/
static int vfio_msi_alloc_irq(struct vfio_pci_core_device *vdev,
unsigned int vector, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
struct msi_map map;
int irq;
u16 cmd;
irq = pci_irq_vector(pdev, vector);
if (WARN_ON_ONCE(irq == 0))
return -EINVAL;
if (irq > 0 || !msix || !vdev->has_dyn_msix)
return irq;
cmd = vfio_pci_memory_lock_and_enable(vdev);
map = pci_msix_alloc_irq_at(pdev, vector, NULL);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
return map.index < 0 ? map.index : map.virq;
}
static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
unsigned int vector, int fd, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
struct eventfd_ctx *trigger;
int irq = -EINVAL, ret;
u16 cmd;
ctx = vfio_irq_ctx_get(vdev, vector);
if (ctx) {
irq_bypass_unregister_producer(&ctx->producer);
irq = pci_irq_vector(pdev, vector);
cmd = vfio_pci_memory_lock_and_enable(vdev);
free_irq(irq, ctx->trigger);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
/* Interrupt stays allocated, will be freed at MSI-X disable. */
kfree(ctx->name);
eventfd_ctx_put(ctx->trigger);
vfio_irq_ctx_free(vdev, ctx, vector);
}
if (fd < 0)
return 0;
if (irq == -EINVAL) {
/* Interrupt stays allocated, will be freed at MSI-X disable. */
irq = vfio_msi_alloc_irq(vdev, vector, msix);
if (irq < 0)
return irq;
}
ctx = vfio_irq_ctx_alloc(vdev, vector);
if (!ctx)
return -ENOMEM;
ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
msix ? "x" : "", vector, pci_name(pdev));
if (!ctx->name) {
ret = -ENOMEM;
goto out_free_ctx;
}
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
ret = PTR_ERR(trigger);
goto out_free_name;
}
/*
* If the vector was previously allocated, refresh the on-device
* message data before enabling in case it had been cleared or
* corrupted (e.g. due to backdoor resets) since writing.
*/
cmd = vfio_pci_memory_lock_and_enable(vdev);
if (msix) {
struct msi_msg msg;
get_cached_msi_msg(irq, &msg);
pci_write_msi_msg(irq, &msg);
}
ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
if (ret)
goto out_put_eventfd_ctx;
ctx->producer.token = trigger;
ctx->producer.irq = irq;
ret = irq_bypass_register_producer(&ctx->producer);
if (unlikely(ret)) {
dev_info(&pdev->dev,
"irq bypass producer (token %p) registration fails: %d\n",
ctx->producer.token, ret);
ctx->producer.token = NULL;
}
ctx->trigger = trigger;
return 0;
out_put_eventfd_ctx:
eventfd_ctx_put(trigger);
out_free_name:
kfree(ctx->name);
out_free_ctx:
vfio_irq_ctx_free(vdev, ctx, vector);
return ret;
}
static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
unsigned count, int32_t *fds, bool msix)
{
unsigned int i, j;
int ret = 0;
for (i = 0, j = start; i < count && !ret; i++, j++) {
int fd = fds ? fds[i] : -1;
ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
}
if (ret) {
for (i = start; i < j; i++)
vfio_msi_set_vector_signal(vdev, i, -1, msix);
}
return ret;
}
static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
unsigned long i;
u16 cmd;
xa_for_each(&vdev->ctx, i, ctx) {
vfio_virqfd_disable(&ctx->unmask);
vfio_virqfd_disable(&ctx->mask);
vfio_msi_set_vector_signal(vdev, i, -1, msix);
}
cmd = vfio_pci_memory_lock_and_enable(vdev);
pci_free_irq_vectors(pdev);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
/*
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
* via their shutdown paths. Restore for NoINTx devices.
*/
if (vdev->nointx)
pci_intx(pdev, 0);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
}
/*
* IOCTL support
*/
static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (!is_intx(vdev) || start != 0 || count != 1)
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t unmask = *(uint8_t *)data;
if (unmask)
vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
int32_t fd = *(int32_t *)data;
if (WARN_ON_ONCE(!ctx))
return -EINVAL;
if (fd >= 0)
return vfio_virqfd_enable((void *) vdev,
vfio_pci_intx_unmask_handler,
vfio_send_intx_eventfd, NULL,
&ctx->unmask, fd);
vfio_virqfd_disable(&ctx->unmask);
}
return 0;
}
static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (!is_intx(vdev) || start != 0 || count != 1)
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t mask = *(uint8_t *)data;
if (mask)
vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
return -ENOTTY; /* XXX implement me */
}
return 0;
}
static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
vfio_intx_disable(vdev);
return 0;
}
if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
int ret;
if (is_intx(vdev))
return vfio_intx_set_signal(vdev, fd);
ret = vfio_intx_enable(vdev);
if (ret)
return ret;
ret = vfio_intx_set_signal(vdev, fd);
if (ret)
vfio_intx_disable(vdev);
return ret;
}
if (!is_intx(vdev))
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_send_intx_eventfd(vdev, NULL);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger = *(uint8_t *)data;
if (trigger)
vfio_send_intx_eventfd(vdev, NULL);
}
return 0;
}
static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
struct vfio_pci_irq_ctx *ctx;
unsigned int i;
bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
vfio_msi_disable(vdev, msix);
return 0;
}
if (!(irq_is(vdev, index) || is_irq_none(vdev)))
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t *fds = data;
int ret;
if (vdev->irq_type == index)
return vfio_msi_set_block(vdev, start, count,
fds, msix);
ret = vfio_msi_enable(vdev, start + count, msix);
if (ret)
return ret;
ret = vfio_msi_set_block(vdev, start, count, fds, msix);
if (ret)
vfio_msi_disable(vdev, msix);
return ret;
}
if (!irq_is(vdev, index))
return -EINVAL;
for (i = start; i < start + count; i++) {
ctx = vfio_irq_ctx_get(vdev, i);
if (!ctx)
continue;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
eventfd_signal(ctx->trigger, 1);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t *bools = data;
if (bools[i - start])
eventfd_signal(ctx->trigger, 1);
}
}
return 0;
}
static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
unsigned int count, uint32_t flags,
void *data)
{
/* DATA_NONE/DATA_BOOL enables loopback testing */
if (flags & VFIO_IRQ_SET_DATA_NONE) {
if (*ctx) {
if (count) {
eventfd_signal(*ctx, 1);
} else {
eventfd_ctx_put(*ctx);
*ctx = NULL;
}
return 0;
}
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger;
if (!count)
return -EINVAL;
trigger = *(uint8_t *)data;
if (trigger && *ctx)
eventfd_signal(*ctx, 1);
return 0;
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd;
if (!count)
return -EINVAL;
fd = *(int32_t *)data;
if (fd == -1) {
if (*ctx)
eventfd_ctx_put(*ctx);
*ctx = NULL;
} else if (fd >= 0) {
struct eventfd_ctx *efdctx;
efdctx = eventfd_ctx_fdget(fd);
if (IS_ERR(efdctx))
return PTR_ERR(efdctx);
if (*ctx)
eventfd_ctx_put(*ctx);
*ctx = efdctx;
}
return 0;
}
return -EINVAL;
}
static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
count, flags, data);
}
static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
count, flags, data);
}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
unsigned index, unsigned start, unsigned count,
void *data)
{
int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
unsigned start, unsigned count, uint32_t flags,
void *data) = NULL;
switch (index) {
case VFIO_PCI_INTX_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
func = vfio_pci_set_intx_mask;
break;
case VFIO_IRQ_SET_ACTION_UNMASK:
func = vfio_pci_set_intx_unmask;
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_pci_set_intx_trigger;
break;
}
break;
case VFIO_PCI_MSI_IRQ_INDEX:
case VFIO_PCI_MSIX_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
case VFIO_IRQ_SET_ACTION_UNMASK:
/* XXX Need masking support exported */
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_pci_set_msi_trigger;
break;
}
break;
case VFIO_PCI_ERR_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_TRIGGER:
if (pci_is_pcie(vdev->pdev))
func = vfio_pci_set_err_trigger;
break;
}
break;
case VFIO_PCI_REQ_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_pci_set_req_trigger;
break;
}
break;
}
if (!func)
return -ENOTTY;
return func(vdev, index, start, count, flags, data);
}
| linux-master | drivers/vfio/pci/vfio_pci_intrs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/aperture.h>
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vgaarb.h>
#include <linux/nospec.h>
#include <linux/sched/mm.h>
#include <linux/iommufd.h>
#if IS_ENABLED(CONFIG_EEH)
#include <asm/eeh.h>
#endif
#include "vfio_pci_priv.h"
#define DRIVER_AUTHOR "Alex Williamson <[email protected]>"
#define DRIVER_DESC "core driver for VFIO based PCI devices"
static bool nointxmask;
static bool disable_vga;
static bool disable_idle_d3;
/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
static LIST_HEAD(vfio_pci_sriov_pfs);
struct vfio_pci_dummy_resource {
struct resource resource;
int index;
struct list_head res_next;
};
struct vfio_pci_vf_token {
struct mutex lock;
uuid_t uuid;
int users;
};
struct vfio_pci_mmap_vma {
struct vm_area_struct *vma;
struct list_head vma_next;
};
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
return disable_vga;
#else
return true;
#endif
}
/*
* Our VGA arbiter participation is limited since we don't know anything
* about the device itself. However, if the device is the only VGA device
* downstream of a bridge and VFIO VGA support is disabled, then we can
* safely return legacy VGA IO and memory as not decoded since the user
* has no way to get to it and routing can be disabled externally at the
* bridge.
*/
static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
{
struct pci_dev *tmp = NULL;
unsigned char max_busnr;
unsigned int decodes;
if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
max_busnr = pci_bus_max_busnr(pdev->bus);
decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
if (tmp == pdev ||
pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
pci_is_root_bus(tmp->bus))
continue;
if (tmp->bus->number >= pdev->bus->number &&
tmp->bus->number <= max_busnr) {
pci_dev_put(tmp);
decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
break;
}
}
return decodes;
}
static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
{
struct resource *res;
int i;
struct vfio_pci_dummy_resource *dummy_res;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
int bar = i + PCI_STD_RESOURCES;
res = &vdev->pdev->resource[bar];
if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
goto no_mmap;
if (!(res->flags & IORESOURCE_MEM))
goto no_mmap;
/*
* The PCI core shouldn't set up a resource with a
* type but zero size. But there may be bugs that
* cause us to do that.
*/
if (!resource_size(res))
goto no_mmap;
if (resource_size(res) >= PAGE_SIZE) {
vdev->bar_mmap_supported[bar] = true;
continue;
}
if (!(res->start & ~PAGE_MASK)) {
/*
* Add a dummy resource to reserve the remainder
* of the exclusive page in case that hot-add
* device's bar is assigned into it.
*/
dummy_res =
kzalloc(sizeof(*dummy_res), GFP_KERNEL_ACCOUNT);
if (dummy_res == NULL)
goto no_mmap;
dummy_res->resource.name = "vfio sub-page reserved";
dummy_res->resource.start = res->end + 1;
dummy_res->resource.end = res->start + PAGE_SIZE - 1;
dummy_res->resource.flags = res->flags;
if (request_resource(res->parent,
&dummy_res->resource)) {
kfree(dummy_res);
goto no_mmap;
}
dummy_res->index = bar;
list_add(&dummy_res->res_next,
&vdev->dummy_resources_list);
vdev->bar_mmap_supported[bar] = true;
continue;
}
/*
* Here we don't handle the case when the BAR is not page
* aligned because we can't expect the BAR will be
* assigned into the same location in a page in guest
* when we passthrough the BAR. And it's hard to access
* this BAR in userspace because we have no way to get
* the BAR's location in a page.
*/
no_mmap:
vdev->bar_mmap_supported[bar] = false;
}
}
struct vfio_pci_group_info;
static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
struct vfio_pci_group_info *groups,
struct iommufd_ctx *iommufd_ctx);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
* _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
* If a device implements the former but not the latter we would typically
* expect broken_intx_masking be set and require an exclusive interrupt.
* However since we do have control of the device's ability to assert INTx,
* we can instead pretend that the device does not implement INTx, virtualizing
* the pin register to report zero and maintaining DisINTx set on the host.
*/
static bool vfio_pci_nointx(struct pci_dev *pdev)
{
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
/* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
case 0x1572:
case 0x1574:
case 0x1580 ... 0x1581:
case 0x1583 ... 0x158b:
case 0x37d0 ... 0x37d2:
/* X550 */
case 0x1563:
return true;
default:
return false;
}
}
return false;
}
static void vfio_pci_probe_power_state(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u16 pmcsr;
if (!pdev->pm_cap)
return;
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
}
/*
* pci_set_power_state() wrapper handling devices which perform a soft reset on
* D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
* restore when returned to D0. Saved separately from pci_saved_state for use
* by PM capability emulation and separately from pci_dev internal saved state
* to avoid it being overwritten and consumed around other resets.
*/
int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t state)
{
struct pci_dev *pdev = vdev->pdev;
bool needs_restore = false, needs_save = false;
int ret;
/* Prevent changing power state for PFs with VFs enabled */
if (pci_num_vf(pdev) && state > PCI_D0)
return -EBUSY;
if (vdev->needs_pm_restore) {
if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
pci_save_state(pdev);
needs_save = true;
}
if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
needs_restore = true;
}
ret = pci_set_power_state(pdev, state);
if (!ret) {
/* D3 might be unsupported via quirk, skip unless in D3 */
if (needs_save && pdev->current_state >= PCI_D3hot) {
/*
* The current PCI state will be saved locally in
* 'pm_save' during the D3hot transition. When the
* device state is changed to D0 again with the current
* function, then pci_store_saved_state() will restore
* the state and will free the memory pointed by
* 'pm_save'. There are few cases where the PCI power
* state can be changed to D0 without the involvement
* of the driver. For these cases, free the earlier
* allocated memory first before overwriting 'pm_save'
* to prevent the memory leak.
*/
kfree(vdev->pm_save);
vdev->pm_save = pci_store_saved_state(pdev);
} else if (needs_restore) {
pci_load_and_free_saved_state(pdev, &vdev->pm_save);
pci_restore_state(pdev);
}
}
return ret;
}
static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev,
struct eventfd_ctx *efdctx)
{
/*
* The vdev power related flags are protected with 'memory_lock'
* semaphore.
*/
vfio_pci_zap_and_down_write_memory_lock(vdev);
if (vdev->pm_runtime_engaged) {
up_write(&vdev->memory_lock);
return -EINVAL;
}
vdev->pm_runtime_engaged = true;
vdev->pm_wake_eventfd_ctx = efdctx;
pm_runtime_put_noidle(&vdev->pdev->dev);
up_write(&vdev->memory_lock);
return 0;
}
static int vfio_pci_core_pm_entry(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz)
{
struct vfio_pci_core_device *vdev =
container_of(device, struct vfio_pci_core_device, vdev);
int ret;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
if (ret != 1)
return ret;
/*
* Inside vfio_pci_runtime_pm_entry(), only the runtime PM usage count
* will be decremented. The pm_runtime_put() will be invoked again
* while returning from the ioctl and then the device can go into
* runtime suspended state.
*/
return vfio_pci_runtime_pm_entry(vdev, NULL);
}
static int vfio_pci_core_pm_entry_with_wakeup(
struct vfio_device *device, u32 flags,
struct vfio_device_low_power_entry_with_wakeup __user *arg,
size_t argsz)
{
struct vfio_pci_core_device *vdev =
container_of(device, struct vfio_pci_core_device, vdev);
struct vfio_device_low_power_entry_with_wakeup entry;
struct eventfd_ctx *efdctx;
int ret;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
sizeof(entry));
if (ret != 1)
return ret;
if (copy_from_user(&entry, arg, sizeof(entry)))
return -EFAULT;
if (entry.wakeup_eventfd < 0)
return -EINVAL;
efdctx = eventfd_ctx_fdget(entry.wakeup_eventfd);
if (IS_ERR(efdctx))
return PTR_ERR(efdctx);
ret = vfio_pci_runtime_pm_entry(vdev, efdctx);
if (ret)
eventfd_ctx_put(efdctx);
return ret;
}
static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
{
if (vdev->pm_runtime_engaged) {
vdev->pm_runtime_engaged = false;
pm_runtime_get_noresume(&vdev->pdev->dev);
if (vdev->pm_wake_eventfd_ctx) {
eventfd_ctx_put(vdev->pm_wake_eventfd_ctx);
vdev->pm_wake_eventfd_ctx = NULL;
}
}
}
static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
{
/*
* The vdev power related flags are protected with 'memory_lock'
* semaphore.
*/
down_write(&vdev->memory_lock);
__vfio_pci_runtime_pm_exit(vdev);
up_write(&vdev->memory_lock);
}
static int vfio_pci_core_pm_exit(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz)
{
struct vfio_pci_core_device *vdev =
container_of(device, struct vfio_pci_core_device, vdev);
int ret;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0);
if (ret != 1)
return ret;
/*
* The device is always in the active state here due to pm wrappers
* around ioctls. If the device had entered a low power state and
* pm_wake_eventfd_ctx is valid, vfio_pci_core_runtime_resume() has
* already signaled the eventfd and exited low power mode itself.
* pm_runtime_engaged protects the redundant call here.
*/
vfio_pci_runtime_pm_exit(vdev);
return 0;
}
#ifdef CONFIG_PM
static int vfio_pci_core_runtime_suspend(struct device *dev)
{
struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
down_write(&vdev->memory_lock);
/*
* The user can move the device into D3hot state before invoking
* power management IOCTL. Move the device into D0 state here and then
* the pci-driver core runtime PM suspend function will move the device
* into the low power state. Also, for the devices which have
* NoSoftRst-, it will help in restoring the original state
* (saved locally in 'vdev->pm_save').
*/
vfio_pci_set_power_state(vdev, PCI_D0);
up_write(&vdev->memory_lock);
/*
* If INTx is enabled, then mask INTx before going into the runtime
* suspended state and unmask the same in the runtime resume.
* If INTx has already been masked by the user, then
* vfio_pci_intx_mask() will return false and in that case, INTx
* should not be unmasked in the runtime resume.
*/
vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
vfio_pci_intx_mask(vdev));
return 0;
}
static int vfio_pci_core_runtime_resume(struct device *dev)
{
struct vfio_pci_core_device *vdev = dev_get_drvdata(dev);
/*
* Resume with a pm_wake_eventfd_ctx signals the eventfd and exit
* low power mode.
*/
down_write(&vdev->memory_lock);
if (vdev->pm_wake_eventfd_ctx) {
eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
__vfio_pci_runtime_pm_exit(vdev);
}
up_write(&vdev->memory_lock);
if (vdev->pm_intx_masked)
vfio_pci_intx_unmask(vdev);
return 0;
}
#endif /* CONFIG_PM */
/*
* The pci-driver core runtime PM routines always save the device state
* before going into suspended state. If the device is going into low power
* state with only with runtime PM ops, then no explicit handling is needed
* for the devices which have NoSoftRst-.
*/
static const struct dev_pm_ops vfio_pci_core_pm_ops = {
SET_RUNTIME_PM_OPS(vfio_pci_core_runtime_suspend,
vfio_pci_core_runtime_resume,
NULL)
};
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
u16 cmd;
u8 msix_pos;
if (!disable_idle_d3) {
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
}
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
ret = pci_enable_device(pdev);
if (ret)
goto out_power;
/* If reset fails because of the device lock, fail this path entirely */
ret = pci_try_reset_function(pdev);
if (ret == -EAGAIN)
goto out_disable_device;
vdev->reset_works = !ret;
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
if (!vdev->pci_saved_state)
pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
if (likely(!nointxmask)) {
if (vfio_pci_nointx(pdev)) {
pci_info(pdev, "Masking broken INTx support\n");
vdev->nointx = true;
pci_intx(pdev, 0);
} else
vdev->pci_2_3 = pci_intx_mask_supported(pdev);
}
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
ret = vfio_pci_zdev_open_device(vdev);
if (ret)
goto out_free_state;
ret = vfio_config_init(vdev);
if (ret)
goto out_free_zdev;
msix_pos = pdev->msix_cap;
if (msix_pos) {
u16 flags;
u32 table;
pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
vdev->has_dyn_msix = pci_msix_can_alloc_dyn(pdev);
} else {
vdev->msix_bar = 0xFF;
vdev->has_dyn_msix = false;
}
if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
vdev->has_vga = true;
return 0;
out_free_zdev:
vfio_pci_zdev_close_device(vdev);
out_free_state:
kfree(vdev->pci_saved_state);
vdev->pci_saved_state = NULL;
out_disable_device:
pci_disable_device(pdev);
out_power:
if (!disable_idle_d3)
pm_runtime_put(&pdev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_dummy_resource *dummy_res, *tmp;
struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
int i, bar;
/* For needs_reset */
lockdep_assert_held(&vdev->vdev.dev_set->lock);
/*
* This function can be invoked while the power state is non-D0.
* This non-D0 power state can be with or without runtime PM.
* vfio_pci_runtime_pm_exit() will internally increment the usage
* count corresponding to pm_runtime_put() called during low power
* feature entry and then pm_runtime_resume() will wake up the device,
* if the device has already gone into the suspended state. Otherwise,
* the vfio_pci_set_power_state() will change the device power state
* to D0.
*/
vfio_pci_runtime_pm_exit(vdev);
pm_runtime_resume(&pdev->dev);
/*
* This function calls __pci_reset_function_locked() which internally
* can use pci_pm_reset() for the function reset. pci_pm_reset() will
* fail if the power state is non-D0. Also, for the devices which
* have NoSoftRst-, the reset function can cause the PCI config space
* reset without restoring the original state (saved locally in
* 'vdev->pm_save').
*/
vfio_pci_set_power_state(vdev, PCI_D0);
/* Stop the device from further DMA */
pci_clear_master(pdev);
vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
VFIO_IRQ_SET_ACTION_TRIGGER,
vdev->irq_type, 0, 0, NULL);
/* Device closed, don't need mutex here */
list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
&vdev->ioeventfds_list, next) {
vfio_virqfd_disable(&ioeventfd->virqfd);
list_del(&ioeventfd->next);
kfree(ioeventfd);
}
vdev->ioeventfds_nr = 0;
vdev->virq_disabled = false;
for (i = 0; i < vdev->num_regions; i++)
vdev->region[i].ops->release(vdev, &vdev->region[i]);
vdev->num_regions = 0;
kfree(vdev->region);
vdev->region = NULL; /* don't krealloc a freed pointer */
vfio_config_free(vdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar = i + PCI_STD_RESOURCES;
if (!vdev->barmap[bar])
continue;
pci_iounmap(pdev, vdev->barmap[bar]);
pci_release_selected_regions(pdev, 1 << bar);
vdev->barmap[bar] = NULL;
}
list_for_each_entry_safe(dummy_res, tmp,
&vdev->dummy_resources_list, res_next) {
list_del(&dummy_res->res_next);
release_resource(&dummy_res->resource);
kfree(dummy_res);
}
vdev->needs_reset = true;
vfio_pci_zdev_close_device(vdev);
/*
* If we have saved state, restore it. If we can reset the device,
* even better. Resetting with current state seems better than
* nothing, but saving and restoring current state without reset
* is just busy work.
*/
if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
if (!vdev->reset_works)
goto out;
pci_save_state(pdev);
}
/*
* Disable INTx and MSI, presumably to avoid spurious interrupts
* during reset. Stolen from pci_reset_function()
*/
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
/*
* Try to get the locks ourselves to prevent a deadlock. The
* success of this is dependent on being able to lock the device,
* which is not always possible.
* We can not use the "try" reset interface here, which will
* overwrite the previously restored configuration information.
*/
if (vdev->reset_works && pci_dev_trylock(pdev)) {
if (!__pci_reset_function_locked(pdev))
vdev->needs_reset = false;
pci_dev_unlock(pdev);
}
pci_restore_state(pdev);
out:
pci_disable_device(pdev);
vfio_pci_dev_set_try_reset(vdev->vdev.dev_set);
/* Put the pm-runtime usage counter acquired during enable */
if (!disable_idle_d3)
pm_runtime_put(&pdev->dev);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
void vfio_pci_core_close_device(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
if (vdev->sriov_pf_core_dev) {
mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
vdev->sriov_pf_core_dev->vf_token->users--;
mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
}
#if IS_ENABLED(CONFIG_EEH)
eeh_dev_release(vdev->pdev);
#endif
vfio_pci_core_disable(vdev);
mutex_lock(&vdev->igate);
if (vdev->err_trigger) {
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = NULL;
}
if (vdev->req_trigger) {
eventfd_ctx_put(vdev->req_trigger);
vdev->req_trigger = NULL;
}
mutex_unlock(&vdev->igate);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_close_device);
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
{
vfio_pci_probe_mmaps(vdev);
#if IS_ENABLED(CONFIG_EEH)
eeh_dev_open(vdev->pdev);
#endif
if (vdev->sriov_pf_core_dev) {
mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
vdev->sriov_pf_core_dev->vf_token->users++;
mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
}
}
EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
vdev->nointx || vdev->pdev->is_virtfn)
return 0;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
return pin ? 1 : 0;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msi_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
}
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msix_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSIX_FLAGS, &flags);
return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
}
} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
if (pci_is_pcie(vdev->pdev))
return 1;
} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
return 1;
}
return 0;
}
static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
{
(*(int *)data)++;
return 0;
}
struct vfio_pci_fill_info {
struct vfio_pci_dependent_device __user *devices;
struct vfio_pci_dependent_device __user *devices_end;
struct vfio_device *vdev;
u32 count;
u32 flags;
};
static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
{
struct vfio_pci_dependent_device info = {
.segment = pci_domain_nr(pdev->bus),
.bus = pdev->bus->number,
.devfn = pdev->devfn,
};
struct vfio_pci_fill_info *fill = data;
fill->count++;
if (fill->devices >= fill->devices_end)
return 0;
if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) {
struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev);
struct vfio_device_set *dev_set = fill->vdev->dev_set;
struct vfio_device *vdev;
/*
* hot-reset requires all affected devices be represented in
* the dev_set.
*/
vdev = vfio_find_device_in_devset(dev_set, &pdev->dev);
if (!vdev) {
info.devid = VFIO_PCI_DEVID_NOT_OWNED;
} else {
int id = vfio_iommufd_get_dev_id(vdev, iommufd);
if (id > 0)
info.devid = id;
else if (id == -ENOENT)
info.devid = VFIO_PCI_DEVID_OWNED;
else
info.devid = VFIO_PCI_DEVID_NOT_OWNED;
}
/* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */
if (info.devid == VFIO_PCI_DEVID_NOT_OWNED)
fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
} else {
struct iommu_group *iommu_group;
iommu_group = iommu_group_get(&pdev->dev);
if (!iommu_group)
return -EPERM; /* Cannot reset non-isolated devices */
info.group_id = iommu_group_id(iommu_group);
iommu_group_put(iommu_group);
}
if (copy_to_user(fill->devices, &info, sizeof(info)))
return -EFAULT;
fill->devices++;
return 0;
}
struct vfio_pci_group_info {
int count;
struct file **files;
};
static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
{
for (; pdev; pdev = pdev->bus->self)
if (pdev->bus == slot->bus)
return (pdev->slot == slot);
return false;
}
struct vfio_pci_walk_info {
int (*fn)(struct pci_dev *pdev, void *data);
void *data;
struct pci_dev *pdev;
bool slot;
int ret;
};
static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
{
struct vfio_pci_walk_info *walk = data;
if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
walk->ret = walk->fn(pdev, walk->data);
return walk->ret;
}
static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
int (*fn)(struct pci_dev *,
void *data), void *data,
bool slot)
{
struct vfio_pci_walk_info walk = {
.fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
};
pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
return walk.ret;
}
static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
struct vfio_info_cap *caps)
{
struct vfio_info_cap_header header = {
.id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
.version = 1
};
return vfio_info_add_capability(caps, &header, sizeof(header));
}
int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
unsigned int type, unsigned int subtype,
const struct vfio_pci_regops *ops,
size_t size, u32 flags, void *data)
{
struct vfio_pci_region *region;
region = krealloc(vdev->region,
(vdev->num_regions + 1) * sizeof(*region),
GFP_KERNEL_ACCOUNT);
if (!region)
return -ENOMEM;
vdev->region = region;
vdev->region[vdev->num_regions].type = type;
vdev->region[vdev->num_regions].subtype = subtype;
vdev->region[vdev->num_regions].ops = ops;
vdev->region[vdev->num_regions].size = size;
vdev->region[vdev->num_regions].flags = flags;
vdev->region[vdev->num_regions].data = data;
vdev->num_regions++;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_register_dev_region);
static int vfio_pci_info_atomic_cap(struct vfio_pci_core_device *vdev,
struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_pci_atomic_comp cap = {
.header.id = VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP,
.header.version = 1
};
struct pci_dev *pdev = pci_physfn(vdev->pdev);
u32 devcap2;
pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &devcap2);
if ((devcap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
!pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32))
cap.flags |= VFIO_PCI_ATOMIC_COMP32;
if ((devcap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
!pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64))
cap.flags |= VFIO_PCI_ATOMIC_COMP64;
if ((devcap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP128) &&
!pci_enable_atomic_ops_to_root(pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP128))
cap.flags |= VFIO_PCI_ATOMIC_COMP128;
if (!cap.flags)
return -ENODEV;
return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
}
static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
struct vfio_device_info __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
struct vfio_device_info info = {};
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
int ret;
if (copy_from_user(&info, arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
minsz = min_t(size_t, info.argsz, sizeof(info));
info.flags = VFIO_DEVICE_FLAGS_PCI;
if (vdev->reset_works)
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
if (ret && ret != -ENODEV) {
pci_warn(vdev->pdev,
"Failed to setup zPCI info capabilities\n");
return ret;
}
ret = vfio_pci_info_atomic_cap(vdev, &caps);
if (ret && ret != -ENODEV) {
pci_warn(vdev->pdev,
"Failed to setup AtomicOps info capability\n");
return ret;
}
if (caps.size) {
info.flags |= VFIO_DEVICE_FLAGS_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user(arg + 1, caps.buf, caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info.cap_offset = sizeof(*arg);
}
kfree(caps.buf);
}
return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
}
static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
struct vfio_region_info __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_region_info, offset);
struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
int i, ret;
if (copy_from_user(&info, arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pdev->cfg_size;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
info.flags = 0;
break;
}
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
if (vdev->bar_mmap_supported[info.index]) {
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
if (info.index == vdev->msix_bar) {
ret = msix_mmappable_cap(vdev, &caps);
if (ret)
return ret;
}
}
break;
case VFIO_PCI_ROM_REGION_INDEX: {
void __iomem *io;
size_t size;
u16 cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
/* Report the BAR size, not the ROM size */
info.size = pci_resource_len(pdev, info.index);
if (!info.size) {
/* Shadow ROMs appear as PCI option ROMs */
if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW)
info.size = 0x20000;
else
break;
}
/*
* Is it really there? Enable memory decode for implicit access
* in pci_map_rom().
*/
cmd = vfio_pci_memory_lock_and_enable(vdev);
io = pci_map_rom(pdev, &size);
if (io) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
pci_unmap_rom(pdev, io);
} else {
info.size = 0;
}
vfio_pci_memory_unlock_and_restore(vdev, cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
if (!vdev->has_vga)
return -EINVAL;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = 0xc0000;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
default: {
struct vfio_region_info_cap_type cap_type = {
.header.id = VFIO_REGION_INFO_CAP_TYPE,
.header.version = 1
};
if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
info.index = array_index_nospec(
info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = vdev->region[i].size;
info.flags = vdev->region[i].flags;
cap_type.type = vdev->region[i].type;
cap_type.subtype = vdev->region[i].subtype;
ret = vfio_info_add_capability(&caps, &cap_type.header,
sizeof(cap_type));
if (ret)
return ret;
if (vdev->region[i].ops->add_capability) {
ret = vdev->region[i].ops->add_capability(
vdev, &vdev->region[i], &caps);
if (ret)
return ret;
}
}
}
if (caps.size) {
info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user(arg + 1, caps.buf, caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info.cap_offset = sizeof(*arg);
}
kfree(caps.buf);
}
return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
}
static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
struct vfio_irq_info __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_irq_info, count);
struct vfio_irq_info info;
if (copy_from_user(&info, arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
return -EINVAL;
switch (info.index) {
case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
case VFIO_PCI_REQ_IRQ_INDEX:
break;
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
fallthrough;
default:
return -EINVAL;
}
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = vfio_pci_get_irq_count(vdev, info.index);
if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
info.flags |=
(VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
else if (info.index != VFIO_PCI_MSIX_IRQ_INDEX || !vdev->has_dyn_msix)
info.flags |= VFIO_IRQ_INFO_NORESIZE;
return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
}
static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
struct vfio_irq_set __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_irq_set, count);
struct vfio_irq_set hdr;
u8 *data = NULL;
int max, ret = 0;
size_t data_size = 0;
if (copy_from_user(&hdr, arg, minsz))
return -EFAULT;
max = vfio_pci_get_irq_count(vdev, hdr.index);
ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
&data_size);
if (ret)
return ret;
if (data_size) {
data = memdup_user(&arg->data, data_size);
if (IS_ERR(data))
return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
hdr.count, data);
mutex_unlock(&vdev->igate);
kfree(data);
return ret;
}
static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
void __user *arg)
{
int ret;
if (!vdev->reset_works)
return -EINVAL;
vfio_pci_zap_and_down_write_memory_lock(vdev);
/*
* This function can be invoked while the power state is non-D0. If
* pci_try_reset_function() has been called while the power state is
* non-D0, then pci_try_reset_function() will internally set the power
* state to D0 without vfio driver involvement. For the devices which
* have NoSoftRst-, the reset function can cause the PCI config space
* reset without restoring the original state (saved locally in
* 'vdev->pm_save').
*/
vfio_pci_set_power_state(vdev, PCI_D0);
ret = pci_try_reset_function(vdev->pdev);
up_write(&vdev->memory_lock);
return ret;
}
static int vfio_pci_ioctl_get_pci_hot_reset_info(
struct vfio_pci_core_device *vdev,
struct vfio_pci_hot_reset_info __user *arg)
{
unsigned long minsz =
offsetofend(struct vfio_pci_hot_reset_info, count);
struct vfio_pci_hot_reset_info hdr;
struct vfio_pci_fill_info fill = {};
bool slot = false;
int ret = 0;
if (copy_from_user(&hdr, arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz)
return -EINVAL;
hdr.flags = 0;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
fill.devices = arg->devices;
fill.devices_end = arg->devices +
(hdr.argsz - sizeof(hdr)) / sizeof(arg->devices[0]);
fill.vdev = &vdev->vdev;
if (vfio_device_cdev_opened(&vdev->vdev))
fill.flags |= VFIO_PCI_HOT_RESET_FLAG_DEV_ID |
VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
mutex_lock(&vdev->vdev.dev_set->lock);
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
&fill, slot);
mutex_unlock(&vdev->vdev.dev_set->lock);
if (ret)
return ret;
hdr.count = fill.count;
hdr.flags = fill.flags;
if (copy_to_user(arg, &hdr, minsz))
return -EFAULT;
if (fill.count > fill.devices - arg->devices)
return -ENOSPC;
return 0;
}
static int
vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
int array_count, bool slot,
struct vfio_pci_hot_reset __user *arg)
{
int32_t *group_fds;
struct file **files;
struct vfio_pci_group_info info;
int file_idx, count = 0, ret = 0;
/*
* We can't let userspace give us an arbitrarily large buffer to copy,
* so verify how many we think there could be. Note groups can have
* multiple devices so one group per device is the max.
*/
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
&count, slot);
if (ret)
return ret;
if (array_count > count)
return -EINVAL;
group_fds = kcalloc(array_count, sizeof(*group_fds), GFP_KERNEL);
files = kcalloc(array_count, sizeof(*files), GFP_KERNEL);
if (!group_fds || !files) {
kfree(group_fds);
kfree(files);
return -ENOMEM;
}
if (copy_from_user(group_fds, arg->group_fds,
array_count * sizeof(*group_fds))) {
kfree(group_fds);
kfree(files);
return -EFAULT;
}
/*
* Get the group file for each fd to ensure the group is held across
* the reset
*/
for (file_idx = 0; file_idx < array_count; file_idx++) {
struct file *file = fget(group_fds[file_idx]);
if (!file) {
ret = -EBADF;
break;
}
/* Ensure the FD is a vfio group FD.*/
if (!vfio_file_is_group(file)) {
fput(file);
ret = -EINVAL;
break;
}
files[file_idx] = file;
}
kfree(group_fds);
/* release reference to groups on error */
if (ret)
goto hot_reset_release;
info.count = array_count;
info.files = files;
ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info, NULL);
hot_reset_release:
for (file_idx--; file_idx >= 0; file_idx--)
fput(files[file_idx]);
kfree(files);
return ret;
}
static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
struct vfio_pci_hot_reset __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
struct vfio_pci_hot_reset hdr;
bool slot = false;
if (copy_from_user(&hdr, arg, minsz))
return -EFAULT;
if (hdr.argsz < minsz || hdr.flags)
return -EINVAL;
/* zero-length array is only for cdev opened devices */
if (!!hdr.count == vfio_device_cdev_opened(&vdev->vdev))
return -EINVAL;
/* Can we do a slot or bus reset or neither? */
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
if (hdr.count)
return vfio_pci_ioctl_pci_hot_reset_groups(vdev, hdr.count, slot, arg);
return vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, NULL,
vfio_iommufd_device_ictx(&vdev->vdev));
}
static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
struct vfio_device_ioeventfd __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
struct vfio_device_ioeventfd ioeventfd;
int count;
if (copy_from_user(&ioeventfd, arg, minsz))
return -EFAULT;
if (ioeventfd.argsz < minsz)
return -EINVAL;
if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
return -EINVAL;
count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
if (hweight8(count) != 1 || ioeventfd.fd < -1)
return -EINVAL;
return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
ioeventfd.fd);
}
long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
unsigned long arg)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
void __user *uarg = (void __user *)arg;
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
return vfio_pci_ioctl_get_info(vdev, uarg);
case VFIO_DEVICE_GET_IRQ_INFO:
return vfio_pci_ioctl_get_irq_info(vdev, uarg);
case VFIO_DEVICE_GET_PCI_HOT_RESET_INFO:
return vfio_pci_ioctl_get_pci_hot_reset_info(vdev, uarg);
case VFIO_DEVICE_GET_REGION_INFO:
return vfio_pci_ioctl_get_region_info(vdev, uarg);
case VFIO_DEVICE_IOEVENTFD:
return vfio_pci_ioctl_ioeventfd(vdev, uarg);
case VFIO_DEVICE_PCI_HOT_RESET:
return vfio_pci_ioctl_pci_hot_reset(vdev, uarg);
case VFIO_DEVICE_RESET:
return vfio_pci_ioctl_reset(vdev, uarg);
case VFIO_DEVICE_SET_IRQS:
return vfio_pci_ioctl_set_irqs(vdev, uarg);
default:
return -ENOTTY;
}
}
EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
uuid_t __user *arg, size_t argsz)
{
struct vfio_pci_core_device *vdev =
container_of(device, struct vfio_pci_core_device, vdev);
uuid_t uuid;
int ret;
if (!vdev->vf_token)
return -ENOTTY;
/*
* We do not support GET of the VF Token UUID as this could
* expose the token of the previous device user.
*/
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
sizeof(uuid));
if (ret != 1)
return ret;
if (copy_from_user(&uuid, arg, sizeof(uuid)))
return -EFAULT;
mutex_lock(&vdev->vf_token->lock);
uuid_copy(&vdev->vf_token->uuid, &uuid);
mutex_unlock(&vdev->vf_token->lock);
return 0;
}
int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz)
{
switch (flags & VFIO_DEVICE_FEATURE_MASK) {
case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY:
return vfio_pci_core_pm_entry(device, flags, arg, argsz);
case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP:
return vfio_pci_core_pm_entry_with_wakeup(device, flags,
arg, argsz);
case VFIO_DEVICE_FEATURE_LOW_POWER_EXIT:
return vfio_pci_core_pm_exit(device, flags, arg, argsz);
case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
return vfio_pci_core_feature_token(device, flags, arg, argsz);
default:
return -ENOTTY;
}
}
EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl_feature);
static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
int ret;
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
ret = pm_runtime_resume_and_get(&vdev->pdev->dev);
if (ret) {
pci_info_ratelimited(vdev->pdev, "runtime resume failed %d\n",
ret);
return -EIO;
}
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
ret = vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
break;
case VFIO_PCI_ROM_REGION_INDEX:
if (iswrite)
ret = -EINVAL;
else
ret = vfio_pci_bar_rw(vdev, buf, count, ppos, false);
break;
case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
ret = vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
break;
case VFIO_PCI_VGA_REGION_INDEX:
ret = vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
break;
default:
index -= VFIO_PCI_NUM_REGIONS;
ret = vdev->region[index].ops->rw(vdev, buf,
count, ppos, iswrite);
break;
}
pm_runtime_put(&vdev->pdev->dev);
return ret;
}
ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
if (!count)
return 0;
return vfio_pci_rw(vdev, buf, count, ppos, false);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_read);
ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
if (!count)
return 0;
return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_write);
/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
{
struct vfio_pci_mmap_vma *mmap_vma, *tmp;
/*
* Lock ordering:
* vma_lock is nested under mmap_lock for vm_ops callback paths.
* The memory_lock semaphore is used by both code paths calling
* into this function to zap vmas and the vm_ops.fault callback
* to protect the memory enable state of the device.
*
* When zapping vmas we need to maintain the mmap_lock => vma_lock
* ordering, which requires using vma_lock to walk vma_list to
* acquire an mm, then dropping vma_lock to get the mmap_lock and
* reacquiring vma_lock. This logic is derived from similar
* requirements in uverbs_user_mmap_disassociate().
*
* mmap_lock must always be the top-level lock when it is taken.
* Therefore we can only hold the memory_lock write lock when
* vma_list is empty, as we'd need to take mmap_lock to clear
* entries. vma_list can only be guaranteed empty when holding
* vma_lock, thus memory_lock is nested under vma_lock.
*
* This enables the vm_ops.fault callback to acquire vma_lock,
* followed by memory_lock read lock, while already holding
* mmap_lock without risk of deadlock.
*/
while (1) {
struct mm_struct *mm = NULL;
if (try) {
if (!mutex_trylock(&vdev->vma_lock))
return 0;
} else {
mutex_lock(&vdev->vma_lock);
}
while (!list_empty(&vdev->vma_list)) {
mmap_vma = list_first_entry(&vdev->vma_list,
struct vfio_pci_mmap_vma,
vma_next);
mm = mmap_vma->vma->vm_mm;
if (mmget_not_zero(mm))
break;
list_del(&mmap_vma->vma_next);
kfree(mmap_vma);
mm = NULL;
}
if (!mm)
return 1;
mutex_unlock(&vdev->vma_lock);
if (try) {
if (!mmap_read_trylock(mm)) {
mmput(mm);
return 0;
}
} else {
mmap_read_lock(mm);
}
if (try) {
if (!mutex_trylock(&vdev->vma_lock)) {
mmap_read_unlock(mm);
mmput(mm);
return 0;
}
} else {
mutex_lock(&vdev->vma_lock);
}
list_for_each_entry_safe(mmap_vma, tmp,
&vdev->vma_list, vma_next) {
struct vm_area_struct *vma = mmap_vma->vma;
if (vma->vm_mm != mm)
continue;
list_del(&mmap_vma->vma_next);
kfree(mmap_vma);
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
}
mutex_unlock(&vdev->vma_lock);
mmap_read_unlock(mm);
mmput(mm);
}
}
void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
{
vfio_pci_zap_and_vma_lock(vdev, false);
down_write(&vdev->memory_lock);
mutex_unlock(&vdev->vma_lock);
}
u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
{
u16 cmd;
down_write(&vdev->memory_lock);
pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
if (!(cmd & PCI_COMMAND_MEMORY))
pci_write_config_word(vdev->pdev, PCI_COMMAND,
cmd | PCI_COMMAND_MEMORY);
return cmd;
}
void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 cmd)
{
pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
up_write(&vdev->memory_lock);
}
/* Caller holds vma_lock */
static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
struct vm_area_struct *vma)
{
struct vfio_pci_mmap_vma *mmap_vma;
mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
if (!mmap_vma)
return -ENOMEM;
mmap_vma->vma = vma;
list_add(&mmap_vma->vma_next, &vdev->vma_list);
return 0;
}
/*
* Zap mmaps on open so that we can fault them in on access and therefore
* our vma_list only tracks mappings accessed since last zap.
*/
static void vfio_pci_mmap_open(struct vm_area_struct *vma)
{
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
static void vfio_pci_mmap_close(struct vm_area_struct *vma)
{
struct vfio_pci_core_device *vdev = vma->vm_private_data;
struct vfio_pci_mmap_vma *mmap_vma;
mutex_lock(&vdev->vma_lock);
list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
if (mmap_vma->vma == vma) {
list_del(&mmap_vma->vma_next);
kfree(mmap_vma);
break;
}
}
mutex_unlock(&vdev->vma_lock);
}
static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
struct vfio_pci_mmap_vma *mmap_vma;
vm_fault_t ret = VM_FAULT_NOPAGE;
mutex_lock(&vdev->vma_lock);
down_read(&vdev->memory_lock);
/*
* Memory region cannot be accessed if the low power feature is engaged
* or memory access is disabled.
*/
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
ret = VM_FAULT_SIGBUS;
goto up_out;
}
/*
* We populate the whole vma on fault, so we need to test whether
* the vma has already been mapped, such as for concurrent faults
* to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
* we ask it to fill the same range again.
*/
list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
if (mmap_vma->vma == vma)
goto up_out;
}
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
ret = VM_FAULT_SIGBUS;
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
goto up_out;
}
if (__vfio_pci_add_vma(vdev, vma)) {
ret = VM_FAULT_OOM;
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
up_out:
up_read(&vdev->memory_lock);
mutex_unlock(&vdev->vma_lock);
return ret;
}
static const struct vm_operations_struct vfio_pci_mmap_ops = {
.open = vfio_pci_mmap_open,
.close = vfio_pci_mmap_close,
.fault = vfio_pci_mmap_fault,
};
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
struct pci_dev *pdev = vdev->pdev;
unsigned int index;
u64 phys_len, req_len, pgoff, req_start;
int ret;
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
if (index >= VFIO_PCI_NUM_REGIONS) {
int regnum = index - VFIO_PCI_NUM_REGIONS;
struct vfio_pci_region *region = vdev->region + regnum;
if (region->ops && region->ops->mmap &&
(region->flags & VFIO_REGION_INFO_FLAG_MMAP))
return region->ops->mmap(vdev, region, vma);
return -EINVAL;
}
if (index >= VFIO_PCI_ROM_REGION_INDEX)
return -EINVAL;
if (!vdev->bar_mmap_supported[index])
return -EINVAL;
phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
req_len = vma->vm_end - vma->vm_start;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (req_start + req_len > phys_len)
return -EINVAL;
/*
* Even though we don't make use of the barmap for the mmap,
* we need to request the region and the barmap tracks that.
*/
if (!vdev->barmap[index]) {
ret = pci_request_selected_regions(pdev,
1 << index, "vfio-pci");
if (ret)
return ret;
vdev->barmap[index] = pci_iomap(pdev, index, 0);
if (!vdev->barmap[index]) {
pci_release_selected_regions(pdev, 1 << index);
return -ENOMEM;
}
}
vma->vm_private_data = vdev;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
/*
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
* change vm_flags within the fault handler. Set them now.
*/
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vfio_pci_mmap_ops;
return 0;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_mmap);
void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
struct pci_dev *pdev = vdev->pdev;
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
if (!(count % 10))
pci_notice_ratelimited(pdev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(vdev->req_trigger, 1);
} else if (count == 0) {
pci_warn(pdev,
"No device request channel registered, blocked until released by user\n");
}
mutex_unlock(&vdev->igate);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_request);
static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
bool vf_token, uuid_t *uuid)
{
/*
* There's always some degree of trust or collaboration between SR-IOV
* PF and VFs, even if just that the PF hosts the SR-IOV capability and
* can disrupt VFs with a reset, but often the PF has more explicit
* access to deny service to the VF or access data passed through the
* VF. We therefore require an opt-in via a shared VF token (UUID) to
* represent this trust. This both prevents that a VF driver might
* assume the PF driver is a trusted, in-kernel driver, and also that
* a PF driver might be replaced with a rogue driver, unknown to in-use
* VF drivers.
*
* Therefore when presented with a VF, if the PF is a vfio device and
* it is bound to the vfio-pci driver, the user needs to provide a VF
* token to access the device, in the form of appending a vf_token to
* the device name, for example:
*
* "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
*
* When presented with a PF which has VFs in use, the user must also
* provide the current VF token to prove collaboration with existing
* VF users. If VFs are not in use, the VF token provided for the PF
* device will act to set the VF token.
*
* If the VF token is provided but unused, an error is generated.
*/
if (vdev->pdev->is_virtfn) {
struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
bool match;
if (!pf_vdev) {
if (!vf_token)
return 0; /* PF is not vfio-pci, no VF token */
pci_info_ratelimited(vdev->pdev,
"VF token incorrectly provided, PF not bound to vfio-pci\n");
return -EINVAL;
}
if (!vf_token) {
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
return -EACCES;
}
mutex_lock(&pf_vdev->vf_token->lock);
match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
mutex_unlock(&pf_vdev->vf_token->lock);
if (!match) {
pci_info_ratelimited(vdev->pdev,
"Incorrect VF token provided for device\n");
return -EACCES;
}
} else if (vdev->vf_token) {
mutex_lock(&vdev->vf_token->lock);
if (vdev->vf_token->users) {
if (!vf_token) {
mutex_unlock(&vdev->vf_token->lock);
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
return -EACCES;
}
if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
mutex_unlock(&vdev->vf_token->lock);
pci_info_ratelimited(vdev->pdev,
"Incorrect VF token provided for device\n");
return -EACCES;
}
} else if (vf_token) {
uuid_copy(&vdev->vf_token->uuid, uuid);
}
mutex_unlock(&vdev->vf_token->lock);
} else if (vf_token) {
pci_info_ratelimited(vdev->pdev,
"VF token incorrectly provided, not a PF or VF\n");
return -EINVAL;
}
return 0;
}
#define VF_TOKEN_ARG "vf_token="
int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
bool vf_token = false;
uuid_t uuid;
int ret;
if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
return 0; /* No match */
if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
buf += strlen(pci_name(vdev->pdev));
if (*buf != ' ')
return 0; /* No match: non-whitespace after name */
while (*buf) {
if (*buf == ' ') {
buf++;
continue;
}
if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
strlen(VF_TOKEN_ARG))) {
buf += strlen(VF_TOKEN_ARG);
if (strlen(buf) < UUID_STRING_LEN)
return -EINVAL;
ret = uuid_parse(buf, &uuid);
if (ret)
return ret;
vf_token = true;
buf += UUID_STRING_LEN;
} else {
/* Unknown/duplicate option */
return -EINVAL;
}
}
}
ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
if (ret)
return ret;
return 1; /* Match */
}
EXPORT_SYMBOL_GPL(vfio_pci_core_match);
static int vfio_pci_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct vfio_pci_core_device *vdev = container_of(nb,
struct vfio_pci_core_device, nb);
struct device *dev = data;
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_dev *physfn = pci_physfn(pdev);
if (action == BUS_NOTIFY_ADD_DEVICE &&
pdev->is_virtfn && physfn == vdev->pdev) {
pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
pci_name(pdev));
pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
vdev->vdev.ops->name);
} else if (action == BUS_NOTIFY_BOUND_DRIVER &&
pdev->is_virtfn && physfn == vdev->pdev) {
struct pci_driver *drv = pci_dev_driver(pdev);
if (drv && drv != pci_dev_driver(vdev->pdev))
pci_warn(vdev->pdev,
"VF %s bound to driver %s while PF bound to driver %s\n",
pci_name(pdev), drv->name,
pci_dev_driver(vdev->pdev)->name);
}
return 0;
}
static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_core_device *cur;
struct pci_dev *physfn;
int ret;
if (pdev->is_virtfn) {
/*
* If this VF was created by our vfio_pci_core_sriov_configure()
* then we can find the PF vfio_pci_core_device now, and due to
* the locking in pci_disable_sriov() it cannot change until
* this VF device driver is removed.
*/
physfn = pci_physfn(vdev->pdev);
mutex_lock(&vfio_pci_sriov_pfs_mutex);
list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
if (cur->pdev == physfn) {
vdev->sriov_pf_core_dev = cur;
break;
}
}
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
return 0;
}
/* Not a SRIOV PF */
if (!pdev->is_physfn)
return 0;
vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
if (!vdev->vf_token)
return -ENOMEM;
mutex_init(&vdev->vf_token->lock);
uuid_gen(&vdev->vf_token->uuid);
vdev->nb.notifier_call = vfio_pci_bus_notifier;
ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
if (ret) {
kfree(vdev->vf_token);
return ret;
}
return 0;
}
static void vfio_pci_vf_uninit(struct vfio_pci_core_device *vdev)
{
if (!vdev->vf_token)
return;
bus_unregister_notifier(&pci_bus_type, &vdev->nb);
WARN_ON(vdev->vf_token->users);
mutex_destroy(&vdev->vf_token->lock);
kfree(vdev->vf_token);
}
static int vfio_pci_vga_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
if (!vfio_pci_is_vga(pdev))
return 0;
ret = aperture_remove_conflicting_pci_devices(pdev, vdev->vdev.ops->name);
if (ret)
return ret;
ret = vga_client_register(pdev, vfio_pci_set_decode);
if (ret)
return ret;
vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false));
return 0;
}
static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
if (!vfio_pci_is_vga(pdev))
return;
vga_client_unregister(pdev);
vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO |
VGA_RSRC_LEGACY_MEM);
}
int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
vdev->pdev = to_pci_dev(core_vdev->dev);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
xa_init(&vdev->ctx);
return 0;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_init_dev);
void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
mutex_destroy(&vdev->igate);
mutex_destroy(&vdev->ioeventfds_lock);
mutex_destroy(&vdev->vma_lock);
kfree(vdev->region);
kfree(vdev->pm_save);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev);
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct device *dev = &pdev->dev;
int ret;
/* Drivers must set the vfio_pci_core_device to their drvdata */
if (WARN_ON(vdev != dev_get_drvdata(dev)))
return -EINVAL;
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
if (vdev->vdev.mig_ops) {
if (!(vdev->vdev.mig_ops->migration_get_state &&
vdev->vdev.mig_ops->migration_set_state &&
vdev->vdev.mig_ops->migration_get_data_size) ||
!(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
return -EINVAL;
}
if (vdev->vdev.log_ops && !(vdev->vdev.log_ops->log_start &&
vdev->vdev.log_ops->log_stop &&
vdev->vdev.log_ops->log_read_and_clear))
return -EINVAL;
/*
* Prevent binding to PFs with VFs enabled, the VFs might be in use
* by the host or other users. We cannot capture the VFs if they
* already exist, nor can we track VF users. Disabling SR-IOV here
* would initiate removing the VFs, which would unbind the driver,
* which is prone to blocking if that VF is also in use by vfio-pci.
* Just reject these PFs and let the user sort it out.
*/
if (pci_num_vf(pdev)) {
pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
return -EBUSY;
}
if (pci_is_root_bus(pdev->bus)) {
ret = vfio_assign_device_set(&vdev->vdev, vdev);
} else if (!pci_probe_reset_slot(pdev->slot)) {
ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
} else {
/*
* If there is no slot reset support for this device, the whole
* bus needs to be grouped together to support bus-wide resets.
*/
ret = vfio_assign_device_set(&vdev->vdev, pdev->bus);
}
if (ret)
return ret;
ret = vfio_pci_vf_init(vdev);
if (ret)
return ret;
ret = vfio_pci_vga_init(vdev);
if (ret)
goto out_vf;
vfio_pci_probe_power_state(vdev);
/*
* pci-core sets the device power state to an unknown value at
* bootup and after being removed from a driver. The only
* transition it allows from this unknown state is to D0, which
* typically happens when a driver calls pci_enable_device().
* We're not ready to enable the device yet, but we do want to
* be able to get to D3. Therefore first do a D0 transition
* before enabling runtime PM.
*/
vfio_pci_set_power_state(vdev, PCI_D0);
dev->driver->pm = &vfio_pci_core_pm_ops;
pm_runtime_allow(dev);
if (!disable_idle_d3)
pm_runtime_put(dev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret)
goto out_power;
return 0;
out_power:
if (!disable_idle_d3)
pm_runtime_get_noresume(dev);
pm_runtime_forbid(dev);
out_vf:
vfio_pci_vf_uninit(vdev);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
{
vfio_pci_core_sriov_configure(vdev, 0);
vfio_unregister_group_dev(&vdev->vdev);
vfio_pci_vf_uninit(vdev);
vfio_pci_vga_uninit(vdev);
if (!disable_idle_d3)
pm_runtime_get_noresume(&vdev->pdev->dev);
pm_runtime_forbid(&vdev->pdev->dev);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
mutex_lock(&vdev->igate);
if (vdev->err_trigger)
eventfd_signal(vdev->err_trigger, 1);
mutex_unlock(&vdev->igate);
return PCI_ERS_RESULT_CAN_RECOVER;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
int nr_virtfn)
{
struct pci_dev *pdev = vdev->pdev;
int ret = 0;
device_lock_assert(&pdev->dev);
if (nr_virtfn) {
mutex_lock(&vfio_pci_sriov_pfs_mutex);
/*
* The thread that adds the vdev to the list is the only thread
* that gets to call pci_enable_sriov() and we will only allow
* it to be called once without going through
* pci_disable_sriov()
*/
if (!list_empty(&vdev->sriov_pfs_item)) {
ret = -EINVAL;
goto out_unlock;
}
list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
/*
* The PF power state should always be higher than the VF power
* state. The PF can be in low power state either with runtime
* power management (when there is no user) or PCI_PM_CTRL
* register write by the user. If PF is in the low power state,
* then change the power state to D0 first before enabling
* SR-IOV. Also, this function can be called at any time, and
* userspace PCI_PM_CTRL write can race against this code path,
* so protect the same with 'memory_lock'.
*/
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
goto out_del;
down_write(&vdev->memory_lock);
vfio_pci_set_power_state(vdev, PCI_D0);
ret = pci_enable_sriov(pdev, nr_virtfn);
up_write(&vdev->memory_lock);
if (ret) {
pm_runtime_put(&pdev->dev);
goto out_del;
}
return nr_virtfn;
}
if (pci_num_vf(pdev)) {
pci_disable_sriov(pdev);
pm_runtime_put(&pdev->dev);
}
out_del:
mutex_lock(&vfio_pci_sriov_pfs_mutex);
list_del_init(&vdev->sriov_pfs_item);
out_unlock:
mutex_unlock(&vfio_pci_sriov_pfs_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
const struct pci_error_handlers vfio_pci_core_err_handlers = {
.error_detected = vfio_pci_core_aer_err_detected,
};
EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
static bool vfio_dev_in_groups(struct vfio_device *vdev,
struct vfio_pci_group_info *groups)
{
unsigned int i;
if (!groups)
return false;
for (i = 0; i < groups->count; i++)
if (vfio_file_has_dev(groups->files[i], vdev))
return true;
return false;
}
static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
{
struct vfio_device_set *dev_set = data;
return vfio_find_device_in_devset(dev_set, &pdev->dev) ? 0 : -ENODEV;
}
/*
* vfio-core considers a group to be viable and will create a vfio_device even
* if some devices are bound to drivers like pci-stub or pcieport. Here we
* require all PCI devices to be inside our dev_set since that ensures they stay
* put and that every driver controlling the device can co-ordinate with the
* device reset.
*
* Returns the pci_dev to pass to pci_reset_bus() if every PCI device to be
* reset is inside the dev_set, and pci_reset_bus() can succeed. NULL otherwise.
*/
static struct pci_dev *
vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
{
struct pci_dev *pdev;
lockdep_assert_held(&dev_set->lock);
/*
* By definition all PCI devices in the dev_set share the same PCI
* reset, so any pci_dev will have the same outcomes for
* pci_probe_reset_*() and pci_reset_bus().
*/
pdev = list_first_entry(&dev_set->device_list,
struct vfio_pci_core_device,
vdev.dev_set_list)->pdev;
/* pci_reset_bus() is supported */
if (pci_probe_reset_slot(pdev->slot) && pci_probe_reset_bus(pdev->bus))
return NULL;
if (vfio_pci_for_each_slot_or_bus(pdev, vfio_pci_is_device_in_set,
dev_set,
!pci_probe_reset_slot(pdev->slot)))
return NULL;
return pdev;
}
static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
{
struct vfio_pci_core_device *cur;
int ret;
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
ret = pm_runtime_resume_and_get(&cur->pdev->dev);
if (ret)
goto unwind;
}
return 0;
unwind:
list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
vdev.dev_set_list)
pm_runtime_put(&cur->pdev->dev);
return ret;
}
/*
* We need to get memory_lock for each device, but devices can share mmap_lock,
* therefore we need to zap and hold the vma_lock for each device, and only then
* get each memory_lock.
*/
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
struct vfio_pci_group_info *groups,
struct iommufd_ctx *iommufd_ctx)
{
struct vfio_pci_core_device *cur_mem;
struct vfio_pci_core_device *cur_vma;
struct vfio_pci_core_device *cur;
struct pci_dev *pdev;
bool is_mem = true;
int ret;
mutex_lock(&dev_set->lock);
cur_mem = list_first_entry(&dev_set->device_list,
struct vfio_pci_core_device,
vdev.dev_set_list);
pdev = vfio_pci_dev_set_resettable(dev_set);
if (!pdev) {
ret = -EINVAL;
goto err_unlock;
}
/*
* Some of the devices in the dev_set can be in the runtime suspended
* state. Increment the usage count for all the devices in the dev_set
* before reset and decrement the same after reset.
*/
ret = vfio_pci_dev_set_pm_runtime_get(dev_set);
if (ret)
goto err_unlock;
list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
bool owned;
/*
* Test whether all the affected devices can be reset by the
* user.
*
* If called from a group opened device and the user provides
* a set of groups, all the devices in the dev_set should be
* contained by the set of groups provided by the user.
*
* If called from a cdev opened device and the user provides
* a zero-length array, all the devices in the dev_set must
* be bound to the same iommufd_ctx as the input iommufd_ctx.
* If there is any device that has not been bound to any
* iommufd_ctx yet, check if its iommu_group has any device
* bound to the input iommufd_ctx. Such devices can be
* considered owned by the input iommufd_ctx as the device
* cannot be owned by another iommufd_ctx when its iommu_group
* is owned.
*
* Otherwise, reset is not allowed.
*/
if (iommufd_ctx) {
int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
iommufd_ctx);
owned = (devid > 0 || devid == -ENOENT);
} else {
owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
}
if (!owned) {
ret = -EINVAL;
goto err_undo;
}
/*
* Locking multiple devices is prone to deadlock, runaway and
* unwind if we hit contention.
*/
if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
ret = -EBUSY;
goto err_undo;
}
}
cur_vma = NULL;
list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
if (!down_write_trylock(&cur_mem->memory_lock)) {
ret = -EBUSY;
goto err_undo;
}
mutex_unlock(&cur_mem->vma_lock);
}
cur_mem = NULL;
/*
* The pci_reset_bus() will reset all the devices in the bus.
* The power state can be non-D0 for some of the devices in the bus.
* For these devices, the pci_reset_bus() will internally set
* the power state to D0 without vfio driver involvement.
* For the devices which have NoSoftRst-, the reset function can
* cause the PCI config space reset without restoring the original
* state (saved locally in 'vdev->pm_save').
*/
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
vfio_pci_set_power_state(cur, PCI_D0);
ret = pci_reset_bus(pdev);
err_undo:
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
if (cur == cur_mem)
is_mem = false;
if (cur == cur_vma)
break;
if (is_mem)
up_write(&cur->memory_lock);
else
mutex_unlock(&cur->vma_lock);
}
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
pm_runtime_put(&cur->pdev->dev);
err_unlock:
mutex_unlock(&dev_set->lock);
return ret;
}
static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
{
struct vfio_pci_core_device *cur;
bool needs_reset = false;
/* No other VFIO device in the set can be open. */
if (vfio_device_set_open_count(dev_set) > 1)
return false;
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
needs_reset |= cur->needs_reset;
return needs_reset;
}
/*
* If a bus or slot reset is available for the provided dev_set and:
* - All of the devices affected by that bus or slot reset are unused
* - At least one of the affected devices is marked dirty via
* needs_reset (such as by lack of FLR support)
* Then attempt to perform that bus or slot reset.
*/
static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
{
struct vfio_pci_core_device *cur;
struct pci_dev *pdev;
bool reset_done = false;
if (!vfio_pci_dev_set_needs_reset(dev_set))
return;
pdev = vfio_pci_dev_set_resettable(dev_set);
if (!pdev)
return;
/*
* Some of the devices in the bus can be in the runtime suspended
* state. Increment the usage count for all the devices in the dev_set
* before reset and decrement the same after reset.
*/
if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set))
return;
if (!pci_reset_bus(pdev))
reset_done = true;
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
if (reset_done)
cur->needs_reset = false;
if (!disable_idle_d3)
pm_runtime_put(&cur->pdev->dev);
}
}
void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
bool is_disable_idle_d3)
{
nointxmask = is_nointxmask;
disable_vga = is_disable_vga;
disable_idle_d3 = is_disable_idle_d3;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_set_params);
static void vfio_pci_core_cleanup(void)
{
vfio_pci_uninit_perm_bits();
}
static int __init vfio_pci_core_init(void)
{
/* Allocate shared config space permission data used by all devices */
return vfio_pci_init_perm_bits();
}
module_init(vfio_pci_core_init);
module_exit(vfio_pci_core_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/pci/vfio_pci_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO ZPCI devices support
*
* Copyright (C) IBM Corp. 2020. All rights reserved.
* Author(s): Pierre Morel <[email protected]>
* Matthew Rosato <[email protected]>
*/
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/vfio_zdev.h>
#include <linux/kvm_host.h>
#include <asm/pci_clp.h>
#include <asm/pci_io.h>
#include "vfio_pci_priv.h"
/*
* Add the Base PCI Function information to the device info region.
*/
static int zpci_base_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_base cap = {
.header.id = VFIO_DEVICE_INFO_CAP_ZPCI_BASE,
.header.version = 2,
.start_dma = zdev->start_dma,
.end_dma = zdev->end_dma,
.pchid = zdev->pchid,
.vfn = zdev->vfn,
.fmb_length = zdev->fmb_length,
.pft = zdev->pft,
.gid = zdev->pfgid,
.fh = zdev->fh
};
return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
}
/*
* Add the Base PCI Function Group information to the device info region.
*/
static int zpci_group_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_group cap = {
.header.id = VFIO_DEVICE_INFO_CAP_ZPCI_GROUP,
.header.version = 2,
.dasm = zdev->dma_mask,
.msi_addr = zdev->msi_addr,
.flags = VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH,
.mui = zdev->fmb_update,
.noi = zdev->max_msi,
.maxstbl = ZPCI_MAX_WRITE_SIZE,
.version = zdev->version,
.reserved = 0,
.imaxstbl = zdev->maxstbl
};
return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
}
/*
* Add the device utility string to the device info region.
*/
static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_util *cap;
int cap_size = sizeof(*cap) + CLP_UTIL_STR_LEN;
int ret;
cap = kmalloc(cap_size, GFP_KERNEL);
if (!cap)
return -ENOMEM;
cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_UTIL;
cap->header.version = 1;
cap->size = CLP_UTIL_STR_LEN;
memcpy(cap->util_str, zdev->util_str, cap->size);
ret = vfio_info_add_capability(caps, &cap->header, cap_size);
kfree(cap);
return ret;
}
/*
* Add the function path string to the device info region.
*/
static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
{
struct vfio_device_info_cap_zpci_pfip *cap;
int cap_size = sizeof(*cap) + CLP_PFIP_NR_SEGMENTS;
int ret;
cap = kmalloc(cap_size, GFP_KERNEL);
if (!cap)
return -ENOMEM;
cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_PFIP;
cap->header.version = 1;
cap->size = CLP_PFIP_NR_SEGMENTS;
memcpy(cap->pfip, zdev->pfip, cap->size);
ret = vfio_info_add_capability(caps, &cap->header, cap_size);
kfree(cap);
return ret;
}
/*
* Add all supported capabilities to the VFIO_DEVICE_GET_INFO capability chain.
*/
int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
struct vfio_info_cap *caps)
{
struct zpci_dev *zdev = to_zpci(vdev->pdev);
int ret;
if (!zdev)
return -ENODEV;
ret = zpci_base_cap(zdev, caps);
if (ret)
return ret;
ret = zpci_group_cap(zdev, caps);
if (ret)
return ret;
if (zdev->util_str_avail) {
ret = zpci_util_cap(zdev, caps);
if (ret)
return ret;
}
ret = zpci_pfip_cap(zdev, caps);
return ret;
}
int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
{
struct zpci_dev *zdev = to_zpci(vdev->pdev);
if (!zdev)
return -ENODEV;
if (!vdev->vdev.kvm)
return 0;
if (zpci_kvm_hook.kvm_register)
return zpci_kvm_hook.kvm_register(zdev, vdev->vdev.kvm);
return -ENOENT;
}
void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
{
struct zpci_dev *zdev = to_zpci(vdev->pdev);
if (!zdev || !vdev->vdev.kvm)
return;
if (zpci_kvm_hook.kvm_unregister)
zpci_kvm_hook.kvm_unregister(zdev);
}
| linux-master | drivers/vfio/pci/vfio_pci_zdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI Intel Graphics support
*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Register a device specific region through which to provide read-only
* access to the Intel IGD opregion. The register defining the opregion
* address is also virtualized to prevent user modification.
*/
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include "vfio_pci_priv.h"
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define OPREGION_SIZE (8 * 1024)
#define OPREGION_PCI_ADDR 0xfc
#define OPREGION_RVDA 0x3ba
#define OPREGION_RVDS 0x3c2
#define OPREGION_VERSION 0x16
struct igd_opregion_vbt {
void *opregion;
void *vbt_ex;
};
/**
* igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
* @dst: User buffer ptr to copy to.
* @off: Offset to user buffer ptr. Increased by bytes on return.
* @src: Source buffer to copy from.
* @pos: Increased by bytes on return.
* @remaining: Decreased by bytes on return.
* @bytes: Bytes to copy and adjust off, pos and remaining.
*
* Copy OpRegion to offset from specific source ptr and shift the offset.
*
* Return: 0 on success, -EFAULT otherwise.
*
*/
static inline unsigned long igd_opregion_shift_copy(char __user *dst,
loff_t *off,
void *src,
loff_t *pos,
size_t *remaining,
size_t bytes)
{
if (copy_to_user(dst + (*off), src, bytes))
return -EFAULT;
*off += bytes;
*pos += bytes;
*remaining -= bytes;
return 0;
}
static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
char __user *buf, size_t count, loff_t *ppos,
bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
struct igd_opregion_vbt *opregionvbt = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0;
size_t remaining;
if (pos >= vdev->region[i].size || iswrite)
return -EINVAL;
count = min_t(size_t, count, vdev->region[i].size - pos);
remaining = count;
/* Copy until OpRegion version */
if (remaining && pos < OPREGION_VERSION) {
size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos);
if (igd_opregion_shift_copy(buf, &off,
opregionvbt->opregion + pos, &pos,
&remaining, bytes))
return -EFAULT;
}
/* Copy patched (if necessary) OpRegion version */
if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) {
size_t bytes = min_t(size_t, remaining,
OPREGION_VERSION + sizeof(__le16) - pos);
__le16 version = *(__le16 *)(opregionvbt->opregion +
OPREGION_VERSION);
/* Patch to 2.1 if OpRegion 2.0 has extended VBT */
if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex)
version = cpu_to_le16(0x0201);
if (igd_opregion_shift_copy(buf, &off,
(u8 *)&version +
(pos - OPREGION_VERSION),
&pos, &remaining, bytes))
return -EFAULT;
}
/* Copy until RVDA */
if (remaining && pos < OPREGION_RVDA) {
size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos);
if (igd_opregion_shift_copy(buf, &off,
opregionvbt->opregion + pos, &pos,
&remaining, bytes))
return -EFAULT;
}
/* Copy modified (if necessary) RVDA */
if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) {
size_t bytes = min_t(size_t, remaining,
OPREGION_RVDA + sizeof(__le64) - pos);
__le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ?
OPREGION_SIZE : 0);
if (igd_opregion_shift_copy(buf, &off,
(u8 *)&rvda + (pos - OPREGION_RVDA),
&pos, &remaining, bytes))
return -EFAULT;
}
/* Copy the rest of OpRegion */
if (remaining && pos < OPREGION_SIZE) {
size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos);
if (igd_opregion_shift_copy(buf, &off,
opregionvbt->opregion + pos, &pos,
&remaining, bytes))
return -EFAULT;
}
/* Copy extended VBT if exists */
if (remaining &&
copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE),
remaining))
return -EFAULT;
*ppos += count;
return count;
}
static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
struct vfio_pci_region *region)
{
struct igd_opregion_vbt *opregionvbt = region->data;
if (opregionvbt->vbt_ex)
memunmap(opregionvbt->vbt_ex);
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
}
static const struct vfio_pci_regops vfio_pci_igd_regops = {
.rw = vfio_pci_igd_rw,
.release = vfio_pci_igd_release,
};
static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
{
__le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
u32 addr, size;
struct igd_opregion_vbt *opregionvbt;
int ret;
u16 version;
ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
if (ret)
return ret;
if (!addr || !(~addr))
return -ENODEV;
opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL_ACCOUNT);
if (!opregionvbt)
return -ENOMEM;
opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
if (!opregionvbt->opregion) {
kfree(opregionvbt);
return -ENOMEM;
}
if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) {
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return -EINVAL;
}
size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16));
if (!size) {
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return -EINVAL;
}
size *= 1024; /* In KB */
/*
* OpRegion and VBT:
* When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
* When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
* to hold the VBT data, the Extended VBT region is introduced since
* OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
* introduced to define the extended VBT data location and size.
* OpRegion 2.0: RVDA defines the absolute physical address of the
* extended VBT data, RVDS defines the VBT data size.
* OpRegion 2.1 and above: RVDA defines the relative address of the
* extended VBT data to OpRegion base, RVDS defines the VBT data size.
*
* Due to the RVDA definition diff in OpRegion VBT (also the only diff
* between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
* for OpRegion 2.0 and above makes it possible to support the
* non-contiguous VBT through a single vfio region. From r/w ops view,
* only contiguous VBT after OpRegion with version 2.1+ is exposed,
* regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
* ops will on-the-fly shift the actural offset into VBT so that data at
* correct position can be returned to the requester.
*/
version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion +
OPREGION_VERSION));
if (version >= 0x0200) {
u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion +
OPREGION_RVDA));
u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion +
OPREGION_RVDS));
/* The extended VBT is valid only when RVDA/RVDS are non-zero */
if (rvda && rvds) {
size += rvds;
/*
* Extended VBT location by RVDA:
* Absolute physical addr for 2.0.
* Relative addr to OpRegion header for 2.1+.
*/
if (version == 0x0200)
addr = rvda;
else
addr += rvda;
opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB);
if (!opregionvbt->vbt_ex) {
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return -ENOMEM;
}
}
}
ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
if (ret) {
if (opregionvbt->vbt_ex)
memunmap(opregionvbt->vbt_ex);
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return ret;
}
/* Fill vconfig with the hw value and virtualize register */
*dwordp = cpu_to_le32(addr);
memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
PCI_CAP_ID_INVALID_VIRT, 4);
return ret;
}
static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
char __user *buf, size_t count, loff_t *ppos,
bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
struct pci_dev *pdev = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
size_t size;
int ret;
if (pos >= vdev->region[i].size || iswrite)
return -EINVAL;
size = count = min(count, (size_t)(vdev->region[i].size - pos));
if ((pos & 1) && size) {
u8 val;
ret = pci_user_read_config_byte(pdev, pos, &val);
if (ret)
return ret;
if (copy_to_user(buf + count - size, &val, 1))
return -EFAULT;
pos++;
size--;
}
if ((pos & 3) && size > 2) {
u16 val;
__le16 lval;
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
return ret;
lval = cpu_to_le16(val);
if (copy_to_user(buf + count - size, &lval, 2))
return -EFAULT;
pos += 2;
size -= 2;
}
while (size > 3) {
u32 val;
__le32 lval;
ret = pci_user_read_config_dword(pdev, pos, &val);
if (ret)
return ret;
lval = cpu_to_le32(val);
if (copy_to_user(buf + count - size, &lval, 4))
return -EFAULT;
pos += 4;
size -= 4;
}
while (size >= 2) {
u16 val;
__le16 lval;
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
return ret;
lval = cpu_to_le16(val);
if (copy_to_user(buf + count - size, &lval, 2))
return -EFAULT;
pos += 2;
size -= 2;
}
while (size) {
u8 val;
ret = pci_user_read_config_byte(pdev, pos, &val);
if (ret)
return ret;
if (copy_to_user(buf + count - size, &val, 1))
return -EFAULT;
pos++;
size--;
}
*ppos += count;
return count;
}
static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
struct vfio_pci_region *region)
{
struct pci_dev *pdev = region->data;
pci_dev_put(pdev);
}
static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
.rw = vfio_pci_igd_cfg_rw,
.release = vfio_pci_igd_cfg_release,
};
static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *host_bridge, *lpc_bridge;
int ret;
host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!host_bridge)
return -ENODEV;
if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
pci_dev_put(host_bridge);
return -EINVAL;
}
ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
&vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
VFIO_REGION_INFO_FLAG_READ, host_bridge);
if (ret) {
pci_dev_put(host_bridge);
return ret;
}
lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
if (!lpc_bridge)
return -ENODEV;
if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
pci_dev_put(lpc_bridge);
return -EINVAL;
}
ret = vfio_pci_core_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
&vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
if (ret) {
pci_dev_put(lpc_bridge);
return ret;
}
return 0;
}
int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
{
int ret;
ret = vfio_pci_igd_opregion_init(vdev);
if (ret)
return ret;
ret = vfio_pci_igd_cfg_init(vdev);
if (ret)
return ret;
return 0;
}
| linux-master | drivers/vfio/pci/vfio_pci_igd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include "vfio_pci_priv.h"
#define DRIVER_AUTHOR "Alex Williamson <[email protected]>"
#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
static char ids[1024] __initdata;
module_param_string(ids, ids, sizeof(ids), 0);
MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
static bool nointxmask;
module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nointxmask,
"Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to [email protected] so the device can be fixed automatically via the broken_intx_masking flag.");
#ifdef CONFIG_VFIO_PCI_VGA
static bool disable_vga;
module_param(disable_vga, bool, S_IRUGO);
MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
#endif
static bool disable_idle_d3;
module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_idle_d3,
"Disable using the PCI D3 low power state for idle, unused devices");
static bool enable_sriov;
#ifdef CONFIG_PCI_IOV
module_param(enable_sriov, bool, 0644);
MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
#endif
static bool disable_denylist;
module_param(disable_denylist, bool, 0444);
MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users.");
static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
{
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
case PCI_DEVICE_ID_INTEL_QAT_C62X:
case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
return true;
default:
return false;
}
}
return false;
}
static bool vfio_pci_is_denylisted(struct pci_dev *pdev)
{
if (!vfio_pci_dev_in_denylist(pdev))
return false;
if (disable_denylist) {
pci_warn(pdev,
"device denylist disabled - allowing device %04x:%04x.\n",
pdev->vendor, pdev->device);
return false;
}
pci_warn(pdev, "%04x:%04x exists in vfio-pci device denylist, driver probing disallowed.\n",
pdev->vendor, pdev->device);
return true;
}
static int vfio_pci_open_device(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
struct pci_dev *pdev = vdev->pdev;
int ret;
ret = vfio_pci_core_enable(vdev);
if (ret)
return ret;
if (vfio_pci_is_vga(pdev) &&
pdev->vendor == PCI_VENDOR_ID_INTEL &&
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
if (ret && ret != -ENODEV) {
pci_warn(pdev, "Failed to setup Intel IGD regions\n");
vfio_pci_core_disable(vdev);
return ret;
}
}
vfio_pci_core_finish_enable(vdev);
return 0;
}
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
.init = vfio_pci_core_init_dev,
.release = vfio_pci_core_release_dev,
.open_device = vfio_pci_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
.device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct vfio_pci_core_device *vdev;
int ret;
if (vfio_pci_is_denylisted(pdev))
return -EINVAL;
vdev = vfio_alloc_device(vfio_pci_core_device, vdev, &pdev->dev,
&vfio_pci_ops);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
dev_set_drvdata(&pdev->dev, vdev);
ret = vfio_pci_core_register_device(vdev);
if (ret)
goto out_put_vdev;
return 0;
out_put_vdev:
vfio_put_device(&vdev->vdev);
return ret;
}
static void vfio_pci_remove(struct pci_dev *pdev)
{
struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
vfio_pci_core_unregister_device(vdev);
vfio_put_device(&vdev->vdev);
}
static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{
struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
if (!enable_sriov)
return -ENOENT;
return vfio_pci_core_sriov_configure(vdev, nr_virtfn);
}
static const struct pci_device_id vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_ANY_ID, PCI_ANY_ID) }, /* match all by default */
{}
};
MODULE_DEVICE_TABLE(pci, vfio_pci_table);
static struct pci_driver vfio_pci_driver = {
.name = "vfio-pci",
.id_table = vfio_pci_table,
.probe = vfio_pci_probe,
.remove = vfio_pci_remove,
.sriov_configure = vfio_pci_sriov_configure,
.err_handler = &vfio_pci_core_err_handlers,
.driver_managed_dma = true,
};
static void __init vfio_pci_fill_ids(void)
{
char *p, *id;
int rc;
/* no ids passed actually */
if (ids[0] == '\0')
return;
/* add ids specified in the module parameter */
p = ids;
while ((id = strsep(&p, ","))) {
unsigned int vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields;
if (!strlen(id))
continue;
fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
&vendor, &device, &subvendor, &subdevice,
&class, &class_mask);
if (fields < 2) {
pr_warn("invalid id string \"%s\"\n", id);
continue;
}
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
}
static int __init vfio_pci_init(void)
{
int ret;
bool is_disable_vga = true;
#ifdef CONFIG_VFIO_PCI_VGA
is_disable_vga = disable_vga;
#endif
vfio_pci_core_set_params(nointxmask, is_disable_vga, disable_idle_d3);
/* Register and scan for devices */
ret = pci_register_driver(&vfio_pci_driver);
if (ret)
return ret;
vfio_pci_fill_ids();
if (disable_denylist)
pr_warn("device denylist disabled.\n");
return 0;
}
module_init(vfio_pci_init);
static void __exit vfio_pci_cleanup(void)
{
pci_unregister_driver(&vfio_pci_driver);
}
module_exit(vfio_pci_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/pci/vfio_pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI I/O Port & MMIO access
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*/
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>
#include "vfio_pci_priv.h"
#ifdef __LITTLE_ENDIAN
#define vfio_ioread64 ioread64
#define vfio_iowrite64 iowrite64
#define vfio_ioread32 ioread32
#define vfio_iowrite32 iowrite32
#define vfio_ioread16 ioread16
#define vfio_iowrite16 iowrite16
#else
#define vfio_ioread64 ioread64be
#define vfio_iowrite64 iowrite64be
#define vfio_ioread32 ioread32be
#define vfio_iowrite32 iowrite32be
#define vfio_ioread16 ioread16be
#define vfio_iowrite16 iowrite16be
#endif
#define vfio_ioread8 ioread8
#define vfio_iowrite8 iowrite8
#define VFIO_IOWRITE(size) \
static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size val, void __iomem *io) \
{ \
if (test_mem) { \
down_read(&vdev->memory_lock); \
if (!__vfio_pci_memory_enabled(vdev)) { \
up_read(&vdev->memory_lock); \
return -EIO; \
} \
} \
\
vfio_iowrite##size(val, io); \
\
if (test_mem) \
up_read(&vdev->memory_lock); \
\
return 0; \
}
VFIO_IOWRITE(8)
VFIO_IOWRITE(16)
VFIO_IOWRITE(32)
#ifdef iowrite64
VFIO_IOWRITE(64)
#endif
#define VFIO_IOREAD(size) \
static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size *val, void __iomem *io) \
{ \
if (test_mem) { \
down_read(&vdev->memory_lock); \
if (!__vfio_pci_memory_enabled(vdev)) { \
up_read(&vdev->memory_lock); \
return -EIO; \
} \
} \
\
*val = vfio_ioread##size(io); \
\
if (test_mem) \
up_read(&vdev->memory_lock); \
\
return 0; \
}
VFIO_IOREAD(8)
VFIO_IOREAD(16)
VFIO_IOREAD(32)
/*
* Read or write from an __iomem region (MMIO or I/O port) with an excluded
* range which is inaccessible. The excluded range drops writes and fills
* reads with -1. This is intended for handling MSI-X vector tables and
* leftover space for ROM BARs.
*/
static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
void __iomem *io, char __user *buf,
loff_t off, size_t count, size_t x_start,
size_t x_end, bool iswrite)
{
ssize_t done = 0;
int ret;
while (count) {
size_t fillable, filled;
if (off < x_start)
fillable = min(count, (size_t)(x_start - off));
else if (off >= x_end)
fillable = count;
else
fillable = 0;
if (fillable >= 4 && !(off % 4)) {
u32 val;
if (iswrite) {
if (copy_from_user(&val, buf, 4))
return -EFAULT;
ret = vfio_pci_iowrite32(vdev, test_mem,
val, io + off);
if (ret)
return ret;
} else {
ret = vfio_pci_ioread32(vdev, test_mem,
&val, io + off);
if (ret)
return ret;
if (copy_to_user(buf, &val, 4))
return -EFAULT;
}
filled = 4;
} else if (fillable >= 2 && !(off % 2)) {
u16 val;
if (iswrite) {
if (copy_from_user(&val, buf, 2))
return -EFAULT;
ret = vfio_pci_iowrite16(vdev, test_mem,
val, io + off);
if (ret)
return ret;
} else {
ret = vfio_pci_ioread16(vdev, test_mem,
&val, io + off);
if (ret)
return ret;
if (copy_to_user(buf, &val, 2))
return -EFAULT;
}
filled = 2;
} else if (fillable) {
u8 val;
if (iswrite) {
if (copy_from_user(&val, buf, 1))
return -EFAULT;
ret = vfio_pci_iowrite8(vdev, test_mem,
val, io + off);
if (ret)
return ret;
} else {
ret = vfio_pci_ioread8(vdev, test_mem,
&val, io + off);
if (ret)
return ret;
if (copy_to_user(buf, &val, 1))
return -EFAULT;
}
filled = 1;
} else {
/* Fill reads with -1, drop writes */
filled = min(count, (size_t)(x_end - off));
if (!iswrite) {
u8 val = 0xFF;
size_t i;
for (i = 0; i < filled; i++)
if (copy_to_user(buf + i, &val, 1))
return -EFAULT;
}
}
count -= filled;
done += filled;
off += filled;
buf += filled;
}
return done;
}
static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
void __iomem *io;
if (vdev->barmap[bar])
return 0;
ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
if (ret)
return ret;
io = pci_iomap(pdev, bar, 0);
if (!io) {
pci_release_selected_regions(pdev, 1 << bar);
return -ENOMEM;
}
vdev->barmap[bar] = io;
return 0;
}
ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
size_t x_start = 0, x_end = 0;
resource_size_t end;
void __iomem *io;
struct resource *res = &vdev->pdev->resource[bar];
ssize_t done;
if (pci_resource_start(pdev, bar))
end = pci_resource_len(pdev, bar);
else if (bar == PCI_ROM_RESOURCE &&
pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
end = 0x20000;
else
return -EINVAL;
if (pos >= end)
return -EINVAL;
count = min(count, (size_t)(end - pos));
if (bar == PCI_ROM_RESOURCE) {
/*
* The ROM can fill less space than the BAR, so we start the
* excluded range at the end of the actual ROM. This makes
* filling large ROM BARs much faster.
*/
io = pci_map_rom(pdev, &x_start);
if (!io) {
done = -ENOMEM;
goto out;
}
x_end = end;
} else {
int ret = vfio_pci_setup_barmap(vdev, bar);
if (ret) {
done = ret;
goto out;
}
io = vdev->barmap[bar];
}
if (bar == vdev->msix_bar) {
x_start = vdev->msix_offset;
x_end = vdev->msix_offset + vdev->msix_size;
}
done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
count, x_start, x_end, iswrite);
if (done >= 0)
*ppos += done;
if (bar == PCI_ROM_RESOURCE)
pci_unmap_rom(pdev, io);
out:
return done;
}
#ifdef CONFIG_VFIO_PCI_VGA
ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
int ret;
loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
void __iomem *iomem = NULL;
unsigned int rsrc;
bool is_ioport;
ssize_t done;
if (!vdev->has_vga)
return -EINVAL;
if (pos > 0xbfffful)
return -EINVAL;
switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1);
off = pos - 0xa0000;
rsrc = VGA_RSRC_LEGACY_MEM;
is_ioport = false;
break;
case 0x3b0 ... 0x3bb:
count = min(count, (size_t)(0x3bc - pos));
iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
off = pos - 0x3b0;
rsrc = VGA_RSRC_LEGACY_IO;
is_ioport = true;
break;
case 0x3c0 ... 0x3df:
count = min(count, (size_t)(0x3e0 - pos));
iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
off = pos - 0x3c0;
rsrc = VGA_RSRC_LEGACY_IO;
is_ioport = true;
break;
default:
return -EINVAL;
}
if (!iomem)
return -ENOMEM;
ret = vga_get_interruptible(vdev->pdev, rsrc);
if (ret) {
is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
return ret;
}
/*
* VGA MMIO is a legacy, non-BAR resource that hopefully allows
* probing, so we don't currently worry about access in relation
* to the memory enable bit in the command register.
*/
done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
vga_put(vdev->pdev, rsrc);
is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
if (done >= 0)
*ppos += done;
return done;
}
#endif
static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
bool test_mem)
{
switch (ioeventfd->count) {
case 1:
vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
case 2:
vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
case 4:
vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
#ifdef iowrite64
case 8:
vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
#endif
}
}
static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
{
struct vfio_pci_ioeventfd *ioeventfd = opaque;
struct vfio_pci_core_device *vdev = ioeventfd->vdev;
if (ioeventfd->test_mem) {
if (!down_read_trylock(&vdev->memory_lock))
return 1; /* Lock contended, use thread */
if (!__vfio_pci_memory_enabled(vdev)) {
up_read(&vdev->memory_lock);
return 0;
}
}
vfio_pci_ioeventfd_do_write(ioeventfd, false);
if (ioeventfd->test_mem)
up_read(&vdev->memory_lock);
return 0;
}
static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
{
struct vfio_pci_ioeventfd *ioeventfd = opaque;
vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
}
int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
uint64_t data, int count, int fd)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset);
struct vfio_pci_ioeventfd *ioeventfd;
/* Only support ioeventfds into BARs */
if (bar > VFIO_PCI_BAR5_REGION_INDEX)
return -EINVAL;
if (pos + count > pci_resource_len(pdev, bar))
return -EINVAL;
/* Disallow ioeventfds working around MSI-X table writes */
if (bar == vdev->msix_bar &&
!(pos + count <= vdev->msix_offset ||
pos >= vdev->msix_offset + vdev->msix_size))
return -EINVAL;
#ifndef iowrite64
if (count == 8)
return -EINVAL;
#endif
ret = vfio_pci_setup_barmap(vdev, bar);
if (ret)
return ret;
mutex_lock(&vdev->ioeventfds_lock);
list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) {
if (ioeventfd->pos == pos && ioeventfd->bar == bar &&
ioeventfd->data == data && ioeventfd->count == count) {
if (fd == -1) {
vfio_virqfd_disable(&ioeventfd->virqfd);
list_del(&ioeventfd->next);
vdev->ioeventfds_nr--;
kfree(ioeventfd);
ret = 0;
} else
ret = -EEXIST;
goto out_unlock;
}
}
if (fd < 0) {
ret = -ENODEV;
goto out_unlock;
}
if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) {
ret = -ENOSPC;
goto out_unlock;
}
ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL_ACCOUNT);
if (!ioeventfd) {
ret = -ENOMEM;
goto out_unlock;
}
ioeventfd->vdev = vdev;
ioeventfd->addr = vdev->barmap[bar] + pos;
ioeventfd->data = data;
ioeventfd->pos = pos;
ioeventfd->bar = bar;
ioeventfd->count = count;
ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
vfio_pci_ioeventfd_thread, NULL,
&ioeventfd->virqfd, fd);
if (ret) {
kfree(ioeventfd);
goto out_unlock;
}
list_add(&ioeventfd->next, &vdev->ioeventfds_list);
vdev->ioeventfds_nr++;
out_unlock:
mutex_unlock(&vdev->ioeventfds_lock);
return ret;
}
| linux-master | drivers/vfio/pci/vfio_pci_rdwr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI config space virtualization
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <[email protected]>
*
* Derived from original vfio:
* Copyright 2010 Cisco Systems, Inc. All rights reserved.
* Author: Tom Lyon, [email protected]
*/
/*
* This code handles reading and writing of PCI configuration registers.
* This is hairy because we want to allow a lot of flexibility to the
* user driver, but cannot trust it with all of the config fields.
* Tables determine which fields can be read and written, as well as
* which fields are 'virtualized' - special actions and translations to
* make it appear to the user that he has control, when in fact things
* must be negotiated with the underlying OS.
*/
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/slab.h>
#include "vfio_pci_priv.h"
/* Fake capability ID for standard config space */
#define PCI_CAP_ID_BASIC 0
#define is_bar(offset) \
((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \
(offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4))
/*
* Lengths of PCI Config Capabilities
* 0: Removed from the user visible capability list
* FF: Variable length
*/
static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = {
[PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */
[PCI_CAP_ID_PM] = PCI_PM_SIZEOF,
[PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF,
[PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF,
[PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */
[PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */
[PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */
[PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */
[PCI_CAP_ID_HT] = 0xFF, /* hypertransport */
[PCI_CAP_ID_VNDR] = 0xFF, /* variable */
[PCI_CAP_ID_DBG] = 0, /* debug - don't care */
[PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */
[PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */
[PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */
[PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */
[PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */
[PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */
[PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF,
[PCI_CAP_ID_SATA] = 0xFF,
[PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF,
};
/*
* Lengths of PCIe/PCI-X Extended Config Capabilities
* 0: Removed or masked from the user visible capability list
* FF: Variable length
*/
static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
[PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND,
[PCI_EXT_CAP_ID_VC] = 0xFF,
[PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF,
[PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF,
[PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */
[PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */
[PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */
[PCI_EXT_CAP_ID_MFVC] = 0xFF,
[PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */
[PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */
[PCI_EXT_CAP_ID_VNDR] = 0xFF,
[PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */
[PCI_EXT_CAP_ID_ACS] = 0xFF,
[PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF,
[PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF,
[PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF,
[PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */
[PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF,
[PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF,
[PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */
[PCI_EXT_CAP_ID_REBAR] = 0xFF,
[PCI_EXT_CAP_ID_DPA] = 0xFF,
[PCI_EXT_CAP_ID_TPH] = 0xFF,
[PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF,
[PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */
[PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */
[PCI_EXT_CAP_ID_PASID] = 0, /* not yet */
[PCI_EXT_CAP_ID_DVSEC] = 0xFF,
};
/*
* Read/Write Permission Bits - one bit for each bit in capability
* Any field can be read if it exists, but what is read depends on
* whether the field is 'virtualized', or just pass through to the
* hardware. Any virtualized field is also virtualized for writes.
* Writes are only permitted if they have a 1 bit here.
*/
struct perm_bits {
u8 *virt; /* read/write virtual data, not hw */
u8 *write; /* writeable bits */
int (*readfn)(struct vfio_pci_core_device *vdev, int pos, int count,
struct perm_bits *perm, int offset, __le32 *val);
int (*writefn)(struct vfio_pci_core_device *vdev, int pos, int count,
struct perm_bits *perm, int offset, __le32 val);
};
#define NO_VIRT 0
#define ALL_VIRT 0xFFFFFFFFU
#define NO_WRITE 0
#define ALL_WRITE 0xFFFFFFFFU
static int vfio_user_config_read(struct pci_dev *pdev, int offset,
__le32 *val, int count)
{
int ret = -EINVAL;
u32 tmp_val = 0;
switch (count) {
case 1:
{
u8 tmp;
ret = pci_user_read_config_byte(pdev, offset, &tmp);
tmp_val = tmp;
break;
}
case 2:
{
u16 tmp;
ret = pci_user_read_config_word(pdev, offset, &tmp);
tmp_val = tmp;
break;
}
case 4:
ret = pci_user_read_config_dword(pdev, offset, &tmp_val);
break;
}
*val = cpu_to_le32(tmp_val);
return ret;
}
static int vfio_user_config_write(struct pci_dev *pdev, int offset,
__le32 val, int count)
{
int ret = -EINVAL;
u32 tmp_val = le32_to_cpu(val);
switch (count) {
case 1:
ret = pci_user_write_config_byte(pdev, offset, tmp_val);
break;
case 2:
ret = pci_user_write_config_word(pdev, offset, tmp_val);
break;
case 4:
ret = pci_user_write_config_dword(pdev, offset, tmp_val);
break;
}
return ret;
}
static int vfio_default_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
__le32 virt = 0;
memcpy(val, vdev->vconfig + pos, count);
memcpy(&virt, perm->virt + offset, count);
/* Any non-virtualized bits? */
if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) {
struct pci_dev *pdev = vdev->pdev;
__le32 phys_val = 0;
int ret;
ret = vfio_user_config_read(pdev, pos, &phys_val, count);
if (ret)
return ret;
*val = (phys_val & ~virt) | (*val & virt);
}
return count;
}
static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
__le32 virt = 0, write = 0;
memcpy(&write, perm->write + offset, count);
if (!write)
return count; /* drop, no writable bits */
memcpy(&virt, perm->virt + offset, count);
/* Virtualized and writable bits go to vconfig */
if (write & virt) {
__le32 virt_val = 0;
memcpy(&virt_val, vdev->vconfig + pos, count);
virt_val &= ~(write & virt);
virt_val |= (val & (write & virt));
memcpy(vdev->vconfig + pos, &virt_val, count);
}
/* Non-virtualized and writable bits go to hardware */
if (write & ~virt) {
struct pci_dev *pdev = vdev->pdev;
__le32 phys_val = 0;
int ret;
ret = vfio_user_config_read(pdev, pos, &phys_val, count);
if (ret)
return ret;
phys_val &= ~(write & ~virt);
phys_val |= (val & (write & ~virt));
ret = vfio_user_config_write(pdev, pos, phys_val, count);
if (ret)
return ret;
}
return count;
}
/* Allow direct read from hardware, except for capability next pointer */
static int vfio_direct_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
int ret;
ret = vfio_user_config_read(vdev->pdev, pos, val, count);
if (ret)
return ret;
if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */
if (offset < 4)
memcpy(val, vdev->vconfig + pos, count);
} else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */
if (offset == PCI_CAP_LIST_ID && count > 1)
memcpy(val, vdev->vconfig + pos,
min(PCI_CAP_FLAGS, count));
else if (offset == PCI_CAP_LIST_NEXT)
memcpy(val, vdev->vconfig + pos, 1);
}
return count;
}
/* Raw access skips any kind of virtualization */
static int vfio_raw_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
int ret;
ret = vfio_user_config_write(vdev->pdev, pos, val, count);
if (ret)
return ret;
return count;
}
static int vfio_raw_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
int ret;
ret = vfio_user_config_read(vdev->pdev, pos, val, count);
if (ret)
return ret;
return count;
}
/* Virt access uses only virtualization */
static int vfio_virt_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
memcpy(vdev->vconfig + pos, &val, count);
return count;
}
static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
memcpy(val, vdev->vconfig + pos, count);
return count;
}
/* Default capability regions to read-only, no-virtualization */
static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
[0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
};
static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
[0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
};
/*
* Default unassigned regions to raw read-write access. Some devices
* require this to function as they hide registers between the gaps in
* config space (be2net). Like MMIO and I/O port registers, we have
* to trust the hardware isolation.
*/
static struct perm_bits unassigned_perms = {
.readfn = vfio_raw_config_read,
.writefn = vfio_raw_config_write
};
static struct perm_bits virt_perms = {
.readfn = vfio_virt_config_read,
.writefn = vfio_virt_config_write
};
static void free_perm_bits(struct perm_bits *perm)
{
kfree(perm->virt);
kfree(perm->write);
perm->virt = NULL;
perm->write = NULL;
}
static int alloc_perm_bits(struct perm_bits *perm, int size)
{
/*
* Round up all permission bits to the next dword, this lets us
* ignore whether a read/write exceeds the defined capability
* structure. We can do this because:
* - Standard config space is already dword aligned
* - Capabilities are all dword aligned (bits 0:1 of next reserved)
* - Express capabilities defined as dword aligned
*/
size = round_up(size, 4);
/*
* Zero state is
* - All Readable, None Writeable, None Virtualized
*/
perm->virt = kzalloc(size, GFP_KERNEL);
perm->write = kzalloc(size, GFP_KERNEL);
if (!perm->virt || !perm->write) {
free_perm_bits(perm);
return -ENOMEM;
}
perm->readfn = vfio_default_config_read;
perm->writefn = vfio_default_config_write;
return 0;
}
/*
* Helper functions for filling in permission tables
*/
static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write)
{
p->virt[off] = virt;
p->write[off] = write;
}
/* Handle endian-ness - pci and tables are little-endian */
static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write)
{
*(__le16 *)(&p->virt[off]) = cpu_to_le16(virt);
*(__le16 *)(&p->write[off]) = cpu_to_le16(write);
}
/* Handle endian-ness - pci and tables are little-endian */
static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
{
*(__le32 *)(&p->virt[off]) = cpu_to_le32(virt);
*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
}
/* Caller should hold memory_lock semaphore */
bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
/*
* Memory region cannot be accessed if device power state is D3.
*
* SR-IOV VF memory enable is handled by the MSE bit in the
* PF SR-IOV capability, there's therefore no need to trigger
* faults based on the virtual value.
*/
return pdev->current_state < PCI_D3hot &&
(pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY));
}
/*
* Restore the *real* BARs after we detect a FLR or backdoor reset.
* (backdoor = some device specific technique that we didn't catch)
*/
static void vfio_bar_restore(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u32 *rbar = vdev->rbar;
u16 cmd;
int i;
if (pdev->is_virtfn)
return;
pci_info(pdev, "%s: reset recovery - restoring BARs\n", __func__);
for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
pci_user_write_config_dword(pdev, i, *rbar);
pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
if (vdev->nointx) {
pci_user_read_config_word(pdev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_INTX_DISABLE;
pci_user_write_config_word(pdev, PCI_COMMAND, cmd);
}
}
static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
{
unsigned long flags = pci_resource_flags(pdev, bar);
u32 val;
if (flags & IORESOURCE_IO)
return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO);
val = PCI_BASE_ADDRESS_SPACE_MEMORY;
if (flags & IORESOURCE_PREFETCH)
val |= PCI_BASE_ADDRESS_MEM_PREFETCH;
if (flags & IORESOURCE_MEM_64)
val |= PCI_BASE_ADDRESS_MEM_TYPE_64;
return cpu_to_le32(val);
}
/*
* Pretend we're hardware and tweak the values of the *virtual* PCI BARs
* to reflect the hardware capabilities. This implements BAR sizing.
*/
static void vfio_bar_fixup(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int i;
__le32 *vbar;
u64 mask;
if (!vdev->bardirty)
return;
vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
int bar = i + PCI_STD_RESOURCES;
if (!pci_resource_start(pdev, bar)) {
*vbar = 0; /* Unmapped by host = unimplemented to user */
continue;
}
mask = ~(pci_resource_len(pdev, bar) - 1);
*vbar &= cpu_to_le32((u32)mask);
*vbar |= vfio_generate_bar_flags(pdev, bar);
if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
vbar++;
*vbar &= cpu_to_le32((u32)(mask >> 32));
i++;
}
}
vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
/*
* NB. REGION_INFO will have reported zero size if we weren't able
* to read the ROM, but we still return the actual BAR size here if
* it exists (or the shadow ROM space).
*/
if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
*vbar &= cpu_to_le32((u32)mask);
} else if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW) {
mask = ~(0x20000 - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
*vbar &= cpu_to_le32((u32)mask);
} else
*vbar = 0;
vdev->bardirty = false;
}
static int vfio_basic_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
if (is_bar(offset)) /* pos == offset for basic config */
vfio_bar_fixup(vdev);
count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
/* Mask in virtual memory enable */
if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
u32 tmp_val = le32_to_cpu(*val);
tmp_val |= cmd & PCI_COMMAND_MEMORY;
*val = cpu_to_le32(tmp_val);
}
return count;
}
/* Test whether BARs match the value we think they should contain */
static bool vfio_need_bar_restore(struct vfio_pci_core_device *vdev)
{
int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
u32 bar;
for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) {
if (vdev->rbar[i]) {
ret = pci_user_read_config_dword(vdev->pdev, pos, &bar);
if (ret || vdev->rbar[i] != bar)
return true;
}
}
return false;
}
static int vfio_basic_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
struct pci_dev *pdev = vdev->pdev;
__le16 *virt_cmd;
u16 new_cmd = 0;
int ret;
virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND];
if (offset == PCI_COMMAND) {
bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io;
u16 phys_cmd;
ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd);
if (ret)
return ret;
new_cmd = le32_to_cpu(val);
phys_io = !!(phys_cmd & PCI_COMMAND_IO);
virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
new_io = !!(new_cmd & PCI_COMMAND_IO);
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
if (!new_mem)
vfio_pci_zap_and_down_write_memory_lock(vdev);
else
down_write(&vdev->memory_lock);
/*
* If the user is writing mem/io enable (new_mem/io) and we
* think it's already enabled (virt_mem/io), but the hardware
* shows it disabled (phys_mem/io, then the device has
* undergone some kind of backdoor reset and needs to be
* restored before we allow it to enable the bars.
* SR-IOV devices will trigger this - for mem enable let's
* catch this now and for io enable it will be caught later
*/
if ((new_mem && virt_mem && !phys_mem &&
!pdev->no_command_memory) ||
(new_io && virt_io && !phys_io) ||
vfio_need_bar_restore(vdev))
vfio_bar_restore(vdev);
}
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0) {
if (offset == PCI_COMMAND)
up_write(&vdev->memory_lock);
return count;
}
/*
* Save current memory/io enable bits in vconfig to allow for
* the test above next time.
*/
if (offset == PCI_COMMAND) {
u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
*virt_cmd &= cpu_to_le16(~mask);
*virt_cmd |= cpu_to_le16(new_cmd & mask);
up_write(&vdev->memory_lock);
}
/* Emulate INTx disable */
if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) {
bool virt_intx_disable;
virt_intx_disable = !!(le16_to_cpu(*virt_cmd) &
PCI_COMMAND_INTX_DISABLE);
if (virt_intx_disable && !vdev->virq_disabled) {
vdev->virq_disabled = true;
vfio_pci_intx_mask(vdev);
} else if (!virt_intx_disable && vdev->virq_disabled) {
vdev->virq_disabled = false;
vfio_pci_intx_unmask(vdev);
}
}
if (is_bar(offset))
vdev->bardirty = true;
return count;
}
/* Permissions for the Basic PCI Header */
static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF))
return -ENOMEM;
perm->readfn = vfio_basic_config_read;
perm->writefn = vfio_basic_config_write;
/* Virtualized for SR-IOV functions, which just have FFFF */
p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE);
p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE);
/*
* Virtualize INTx disable, we use it internally for interrupt
* control and can emulate it for non-PCI 2.3 devices.
*/
p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE);
/* Virtualize capability list, we might want to skip/disable */
p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE);
/* No harm to write */
p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE);
p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE);
p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE);
/* Virtualize all bars, can't touch the real ones */
p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE);
p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE);
p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE);
p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE);
p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE);
p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE);
p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE);
/* Allow us to adjust capability chain */
p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE);
/* Sometimes used by sw, just virtualize */
p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
/* Virtualize interrupt pin to allow hiding INTx */
p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE);
return 0;
}
/*
* It takes all the required locks to protect the access of power related
* variables and then invokes vfio_pci_set_power_state().
*/
static void vfio_lock_and_set_power_state(struct vfio_pci_core_device *vdev,
pci_power_t state)
{
if (state >= PCI_D3hot)
vfio_pci_zap_and_down_write_memory_lock(vdev);
else
down_write(&vdev->memory_lock);
vfio_pci_set_power_state(vdev, state);
up_write(&vdev->memory_lock);
}
static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
if (offset == PCI_PM_CTRL) {
pci_power_t state;
switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
case 0:
state = PCI_D0;
break;
case 1:
state = PCI_D1;
break;
case 2:
state = PCI_D2;
break;
case 3:
state = PCI_D3hot;
break;
}
vfio_lock_and_set_power_state(vdev, state);
}
return count;
}
/* Permissions for the Power Management capability */
static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
return -ENOMEM;
perm->writefn = vfio_pm_config_write;
/*
* We always virtualize the next field so we can remove
* capabilities from the chain if we want to.
*/
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
* The guests can't process PME events. If any PME event will be
* generated, then it will be mostly handled in the host and the
* host will clear the PME_STATUS. So virtualize PME_Support bits.
* The vconfig bits will be cleared during device capability
* initialization.
*/
p_setw(perm, PCI_PM_PMC, PCI_PM_CAP_PME_MASK, NO_WRITE);
/*
* Power management is defined *per function*, so we can let
* the user change power state, but we trap and initiate the
* change ourselves, so the state bits are read-only.
*
* The guest can't process PME from D3cold so virtualize PME_Status
* and PME_En bits. The vconfig bits will be cleared during device
* capability initialization.
*/
p_setd(perm, PCI_PM_CTRL,
PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS,
~(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS |
PCI_PM_CTRL_STATE_MASK));
return 0;
}
static int vfio_vpd_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
struct pci_dev *pdev = vdev->pdev;
__le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR);
__le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA);
u16 addr;
u32 data;
/*
* Write through to emulation. If the write includes the upper byte
* of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we
* have work to do.
*/
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0 || offset > PCI_VPD_ADDR + 1 ||
offset + count <= PCI_VPD_ADDR + 1)
return count;
addr = le16_to_cpu(*paddr);
if (addr & PCI_VPD_ADDR_F) {
data = le32_to_cpu(*pdata);
if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
return count;
} else {
data = 0;
if (pci_read_vpd(pdev, addr, 4, &data) < 0)
return count;
*pdata = cpu_to_le32(data);
}
/*
* Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to
* signal completion. If an error occurs above, we assume that not
* toggling this bit will induce a driver timeout.
*/
addr ^= PCI_VPD_ADDR_F;
*paddr = cpu_to_le16(addr);
return count;
}
/* Permissions for Vital Product Data capability */
static int __init init_pci_cap_vpd_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD]))
return -ENOMEM;
perm->writefn = vfio_vpd_config_write;
/*
* We always virtualize the next field so we can remove
* capabilities from the chain if we want to.
*/
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
* Both the address and data registers are virtualized to
* enable access through the pci_vpd_read/write functions
*/
p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE);
p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE);
return 0;
}
/* Permissions for PCI-X capability */
static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
{
/* Alloc 24, but only 8 are used in v0 */
if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2))
return -ENOMEM;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE);
p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE);
return 0;
}
static int vfio_exp_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
offset + PCI_EXP_DEVCTL);
int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
/*
* The FLR bit is virtualized, if set and the device supports PCIe
* FLR, issue a reset_function. Regardless, clear the bit, the spec
* requires it to be always read as zero. NB, reset_function might
* not use a PCIe FLR, we don't have that level of granularity.
*/
if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
u32 cap;
int ret;
*ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
ret = pci_user_read_config_dword(vdev->pdev,
pos - offset + PCI_EXP_DEVCAP,
&cap);
if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
up_write(&vdev->memory_lock);
}
}
/*
* MPS is virtualized to the user, writes do not change the physical
* register since determining a proper MPS value requires a system wide
* device view. The MRRS is largely independent of MPS, but since the
* user does not have that system-wide view, they might set a safe, but
* inefficiently low value. Here we allow writes through to hardware,
* but we set the floor to the physical device MPS setting, so that
* we can at least use full TLPs, as defined by the MPS value.
*
* NB, if any devices actually depend on an artificially low MRRS
* setting, this will need to be revisited, perhaps with a quirk
* though pcie_set_readrq().
*/
if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
readrq = 128 <<
((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
readrq = max(readrq, pcie_get_mps(vdev->pdev));
pcie_set_readrq(vdev->pdev, readrq);
}
return count;
}
/* Permissions for PCI Express capability */
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
{
/* Alloc largest of possible sizes */
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
return -ENOMEM;
perm->writefn = vfio_exp_config_write;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
* Allow writes to device control fields, except devctl_phantom,
* which could confuse IOMMU, MPS, which can break communication
* with other physical devices, and the ARI bit in devctl2, which
* is set at probe time. FLR and MRRS get virtualized via our
* writefn.
*/
p_setw(perm, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
static int vfio_af_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
/*
* The FLR bit is virtualized, if set and the device supports AF
* FLR, issue a reset_function. Regardless, clear the bit, the spec
* requires it to be always read as zero. NB, reset_function might
* not use an AF FLR, we don't have that level of granularity.
*/
if (*ctrl & PCI_AF_CTRL_FLR) {
u8 cap;
int ret;
*ctrl &= ~PCI_AF_CTRL_FLR;
ret = pci_user_read_config_byte(vdev->pdev,
pos - offset + PCI_AF_CAP,
&cap);
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
up_write(&vdev->memory_lock);
}
}
return count;
}
/* Permissions for Advanced Function capability */
static int __init init_pci_cap_af_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
return -ENOMEM;
perm->writefn = vfio_af_config_write;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
return 0;
}
/* Permissions for Advanced Error Reporting extended capability */
static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
{
u32 mask;
if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR]))
return -ENOMEM;
/*
* Virtualize the first dword of all express capabilities
* because it includes the next pointer. This lets us later
* remove capabilities from the chain if we need to.
*/
p_setd(perm, 0, ALL_VIRT, NO_WRITE);
/* Writable bits mask */
mask = PCI_ERR_UNC_UND | /* Undefined */
PCI_ERR_UNC_DLP | /* Data Link Protocol */
PCI_ERR_UNC_SURPDN | /* Surprise Down */
PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */
PCI_ERR_UNC_FCP | /* Flow Control Protocol */
PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */
PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */
PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */
PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */
PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */
PCI_ERR_UNC_ECRC | /* ECRC Error Status */
PCI_ERR_UNC_UNSUP | /* Unsupported Request */
PCI_ERR_UNC_ACSV | /* ACS Violation */
PCI_ERR_UNC_INTN | /* internal error */
PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */
PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */
PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */
p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask);
p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask);
p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask);
mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */
PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */
PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */
PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */
PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */
PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */
PCI_ERR_COR_INTERNAL | /* Corrected Internal */
PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */
p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask);
p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask);
mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */
PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */
p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask);
return 0;
}
/* Permissions for Power Budgeting extended capability */
static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR]))
return -ENOMEM;
p_setd(perm, 0, ALL_VIRT, NO_WRITE);
/* Writing the data selector is OK, the info is still read-only */
p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE);
return 0;
}
/*
* Initialize the shared permission tables
*/
void vfio_pci_uninit_perm_bits(void)
{
free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]);
free_perm_bits(&cap_perms[PCI_CAP_ID_PM]);
free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]);
free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]);
free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]);
free_perm_bits(&cap_perms[PCI_CAP_ID_AF]);
free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
}
int __init vfio_pci_init_perm_bits(void)
{
int ret;
/* Basic config space */
ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]);
/* Capabilities */
ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]);
ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write;
ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
/* Extended capabilities */
ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write;
ecap_perms[PCI_EXT_CAP_ID_DVSEC].writefn = vfio_raw_config_write;
if (ret)
vfio_pci_uninit_perm_bits();
return ret;
}
static int vfio_find_cap_start(struct vfio_pci_core_device *vdev, int pos)
{
u8 cap;
int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
PCI_STD_HEADER_SIZEOF;
cap = vdev->pci_config_map[pos];
if (cap == PCI_CAP_ID_BASIC)
return 0;
/* XXX Can we have to abutting capabilities of the same type? */
while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
pos--;
return pos;
}
static int vfio_msi_config_read(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 *val)
{
/* Update max available queue size from msi_qmax */
if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
__le16 *flags;
int start;
start = vfio_find_cap_start(vdev, pos);
flags = (__le16 *)&vdev->vconfig[start];
*flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK);
*flags |= cpu_to_le16(vdev->msi_qmax << 1);
}
return vfio_default_config_read(vdev, pos, count, perm, offset, val);
}
static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
/* Fixup and write configured queue size and enable to hardware */
if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
__le16 *pflags;
u16 flags;
int start, ret;
start = vfio_find_cap_start(vdev, pos);
pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS];
flags = le16_to_cpu(*pflags);
/* MSI is enabled via ioctl */
if (vdev->irq_type != VFIO_PCI_MSI_IRQ_INDEX)
flags &= ~PCI_MSI_FLAGS_ENABLE;
/* Check queue size */
if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) {
flags &= ~PCI_MSI_FLAGS_QSIZE;
flags |= vdev->msi_qmax << 4;
}
/* Write back to virt and to hardware */
*pflags = cpu_to_le16(flags);
ret = pci_user_write_config_word(vdev->pdev,
start + PCI_MSI_FLAGS,
flags);
if (ret)
return ret;
}
return count;
}
/*
* MSI determination is per-device, so this routine gets used beyond
* initialization time. Don't add __init
*/
static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
{
if (alloc_perm_bits(perm, len))
return -ENOMEM;
perm->readfn = vfio_msi_config_read;
perm->writefn = vfio_msi_config_write;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
* The upper byte of the control register is reserved,
* just setup the lower byte.
*/
p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE);
p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE);
if (flags & PCI_MSI_FLAGS_64BIT) {
p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE);
p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE);
if (flags & PCI_MSI_FLAGS_MASKBIT) {
p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE);
p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE);
}
} else {
p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE);
if (flags & PCI_MSI_FLAGS_MASKBIT) {
p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE);
p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE);
}
}
return 0;
}
/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos)
{
struct pci_dev *pdev = vdev->pdev;
int len, ret;
u16 flags;
ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags);
if (ret)
return pcibios_err_to_errno(ret);
len = 10; /* Minimum size */
if (flags & PCI_MSI_FLAGS_64BIT)
len += 4;
if (flags & PCI_MSI_FLAGS_MASKBIT)
len += 10;
if (vdev->msi_perm)
return len;
vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL_ACCOUNT);
if (!vdev->msi_perm)
return -ENOMEM;
ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
if (ret) {
kfree(vdev->msi_perm);
return ret;
}
return len;
}
/* Determine extended capability length for VC (2 & 9) and MFVC */
static int vfio_vc_cap_len(struct vfio_pci_core_device *vdev, u16 pos)
{
struct pci_dev *pdev = vdev->pdev;
u32 tmp;
int ret, evcc, phases, vc_arb;
int len = PCI_CAP_VC_BASE_SIZEOF;
ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
if (ret)
return pcibios_err_to_errno(ret);
evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
if (ret)
return pcibios_err_to_errno(ret);
if (tmp & PCI_VC_CAP2_128_PHASE)
phases = 128;
else if (tmp & PCI_VC_CAP2_64_PHASE)
phases = 64;
else if (tmp & PCI_VC_CAP2_32_PHASE)
phases = 32;
else
phases = 0;
vc_arb = phases * 4;
/*
* Port arbitration tables are root & switch only;
* function arbitration tables are function 0 only.
* In either case, we'll never let user write them so
* we don't care how big they are
*/
len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF;
if (vc_arb) {
len = round_up(len, 16);
len += vc_arb / 8;
}
return len;
}
static int vfio_cap_len(struct vfio_pci_core_device *vdev, u8 cap, u8 pos)
{
struct pci_dev *pdev = vdev->pdev;
u32 dword;
u16 word;
u8 byte;
int ret;
switch (cap) {
case PCI_CAP_ID_MSI:
return vfio_msi_cap_len(vdev, pos);
case PCI_CAP_ID_PCIX:
ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word);
if (ret)
return pcibios_err_to_errno(ret);
if (PCI_X_CMD_VERSION(word)) {
if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
/* Test for extended capabilities */
pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE,
&dword);
vdev->extended_caps = (dword != 0);
}
return PCI_CAP_PCIX_SIZEOF_V2;
} else
return PCI_CAP_PCIX_SIZEOF_V0;
case PCI_CAP_ID_VNDR:
/* length follows next field */
ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte);
if (ret)
return pcibios_err_to_errno(ret);
return byte;
case PCI_CAP_ID_EXP:
if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
/* Test for extended capabilities */
pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
vdev->extended_caps = (dword != 0);
}
/* length based on version and type */
if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) {
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
return 0xc; /* "All Devices" only, no link */
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
} else {
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
return 0x2c; /* No link */
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
}
case PCI_CAP_ID_HT:
ret = pci_read_config_byte(pdev, pos + 3, &byte);
if (ret)
return pcibios_err_to_errno(ret);
return (byte & HT_3BIT_CAP_MASK) ?
HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG;
case PCI_CAP_ID_SATA:
ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte);
if (ret)
return pcibios_err_to_errno(ret);
byte &= PCI_SATA_REGS_MASK;
if (byte == PCI_SATA_REGS_INLINE)
return PCI_SATA_SIZEOF_LONG;
else
return PCI_SATA_SIZEOF_SHORT;
default:
pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n",
__func__, cap, pos);
}
return 0;
}
static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epos)
{
struct pci_dev *pdev = vdev->pdev;
u8 byte;
u32 dword;
int ret;
switch (ecap) {
case PCI_EXT_CAP_ID_VNDR:
ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
if (ret)
return pcibios_err_to_errno(ret);
return dword >> PCI_VSEC_HDR_LEN_SHIFT;
case PCI_EXT_CAP_ID_VC:
case PCI_EXT_CAP_ID_VC9:
case PCI_EXT_CAP_ID_MFVC:
return vfio_vc_cap_len(vdev, epos);
case PCI_EXT_CAP_ID_ACS:
ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte);
if (ret)
return pcibios_err_to_errno(ret);
if (byte & PCI_ACS_EC) {
int bits;
ret = pci_read_config_byte(pdev,
epos + PCI_ACS_EGRESS_BITS,
&byte);
if (ret)
return pcibios_err_to_errno(ret);
bits = byte ? round_up(byte, 32) : 256;
return 8 + (bits / 8);
}
return 8;
case PCI_EXT_CAP_ID_REBAR:
ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte);
if (ret)
return pcibios_err_to_errno(ret);
byte &= PCI_REBAR_CTRL_NBAR_MASK;
byte >>= PCI_REBAR_CTRL_NBAR_SHIFT;
return 4 + (byte * 8);
case PCI_EXT_CAP_ID_DPA:
ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte);
if (ret)
return pcibios_err_to_errno(ret);
byte &= PCI_DPA_CAP_SUBSTATE_MASK;
return PCI_DPA_BASE_SIZEOF + byte + 1;
case PCI_EXT_CAP_ID_TPH:
ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
if (ret)
return pcibios_err_to_errno(ret);
if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
int sts;
sts = dword & PCI_TPH_CAP_ST_MASK;
sts >>= PCI_TPH_CAP_ST_SHIFT;
return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
}
return PCI_TPH_BASE_SIZEOF;
case PCI_EXT_CAP_ID_DVSEC:
ret = pci_read_config_dword(pdev, epos + PCI_DVSEC_HEADER1, &dword);
if (ret)
return pcibios_err_to_errno(ret);
return PCI_DVSEC_HEADER1_LEN(dword);
default:
pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n",
__func__, ecap, epos);
}
return 0;
}
static void vfio_update_pm_vconfig_bytes(struct vfio_pci_core_device *vdev,
int offset)
{
__le16 *pmc = (__le16 *)&vdev->vconfig[offset + PCI_PM_PMC];
__le16 *ctrl = (__le16 *)&vdev->vconfig[offset + PCI_PM_CTRL];
/* Clear vconfig PME_Support, PME_Status, and PME_En bits */
*pmc &= ~cpu_to_le16(PCI_PM_CAP_PME_MASK);
*ctrl &= ~cpu_to_le16(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS);
}
static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev,
int offset, int size)
{
struct pci_dev *pdev = vdev->pdev;
int ret = 0;
/*
* We try to read physical config space in the largest chunks
* we can, assuming that all of the fields support dword access.
* pci_save_state() makes this same assumption and seems to do ok.
*/
while (size) {
int filled;
if (size >= 4 && !(offset % 4)) {
__le32 *dwordp = (__le32 *)&vdev->vconfig[offset];
u32 dword;
ret = pci_read_config_dword(pdev, offset, &dword);
if (ret)
return ret;
*dwordp = cpu_to_le32(dword);
filled = 4;
} else if (size >= 2 && !(offset % 2)) {
__le16 *wordp = (__le16 *)&vdev->vconfig[offset];
u16 word;
ret = pci_read_config_word(pdev, offset, &word);
if (ret)
return ret;
*wordp = cpu_to_le16(word);
filled = 2;
} else {
u8 *byte = &vdev->vconfig[offset];
ret = pci_read_config_byte(pdev, offset, byte);
if (ret)
return ret;
filled = 1;
}
offset += filled;
size -= filled;
}
return ret;
}
static int vfio_cap_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u8 *map = vdev->pci_config_map;
u16 status;
u8 pos, *prev, cap;
int loops, ret, caps = 0;
/* Any capabilities? */
ret = pci_read_config_word(pdev, PCI_STATUS, &status);
if (ret)
return ret;
if (!(status & PCI_STATUS_CAP_LIST))
return 0; /* Done */
ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
if (ret)
return ret;
/* Mark the previous position in case we want to skip a capability */
prev = &vdev->vconfig[PCI_CAPABILITY_LIST];
/* We can bound our loop, capabilities are dword aligned */
loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
while (pos && loops--) {
u8 next;
int i, len = 0;
ret = pci_read_config_byte(pdev, pos, &cap);
if (ret)
return ret;
ret = pci_read_config_byte(pdev,
pos + PCI_CAP_LIST_NEXT, &next);
if (ret)
return ret;
/*
* ID 0 is a NULL capability, conflicting with our fake
* PCI_CAP_ID_BASIC. As it has no content, consider it
* hidden for now.
*/
if (cap && cap <= PCI_CAP_ID_MAX) {
len = pci_cap_length[cap];
if (len == 0xFF) { /* Variable length */
len = vfio_cap_len(vdev, cap, pos);
if (len < 0)
return len;
}
}
if (!len) {
pci_dbg(pdev, "%s: hiding cap %#x@%#x\n", __func__,
cap, pos);
*prev = next;
pos = next;
continue;
}
/* Sanity check, do we overlap other capabilities? */
for (i = 0; i < len; i++) {
if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
continue;
pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n",
__func__, pos + i, map[pos + i], cap);
}
BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
memset(map + pos, cap, len);
ret = vfio_fill_vconfig_bytes(vdev, pos, len);
if (ret)
return ret;
if (cap == PCI_CAP_ID_PM)
vfio_update_pm_vconfig_bytes(vdev, pos);
prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
pos = next;
caps++;
}
/* If we didn't fill any capabilities, clear the status flag */
if (!caps) {
__le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS];
*vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST);
}
return 0;
}
static int vfio_ecap_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u8 *map = vdev->pci_config_map;
u16 epos;
__le32 *prev = NULL;
int loops, ret, ecaps = 0;
if (!vdev->extended_caps)
return 0;
epos = PCI_CFG_SPACE_SIZE;
loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF;
while (loops-- && epos >= PCI_CFG_SPACE_SIZE) {
u32 header;
u16 ecap;
int i, len = 0;
bool hidden = false;
ret = pci_read_config_dword(pdev, epos, &header);
if (ret)
return ret;
ecap = PCI_EXT_CAP_ID(header);
if (ecap <= PCI_EXT_CAP_ID_MAX) {
len = pci_ext_cap_length[ecap];
if (len == 0xFF) {
len = vfio_ext_cap_len(vdev, ecap, epos);
if (len < 0)
return len;
}
}
if (!len) {
pci_dbg(pdev, "%s: hiding ecap %#x@%#x\n",
__func__, ecap, epos);
/* If not the first in the chain, we can skip over it */
if (prev) {
u32 val = epos = PCI_EXT_CAP_NEXT(header);
*prev &= cpu_to_le32(~(0xffcU << 20));
*prev |= cpu_to_le32(val << 20);
continue;
}
/*
* Otherwise, fill in a placeholder, the direct
* readfn will virtualize this automatically
*/
len = PCI_CAP_SIZEOF;
hidden = true;
}
for (i = 0; i < len; i++) {
if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
continue;
pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n",
__func__, epos + i, map[epos + i], ecap);
}
/*
* Even though ecap is 2 bytes, we're currently a long way
* from exceeding 1 byte capabilities. If we ever make it
* up to 0xFE we'll need to up this to a two-byte, byte map.
*/
BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
memset(map + epos, ecap, len);
ret = vfio_fill_vconfig_bytes(vdev, epos, len);
if (ret)
return ret;
/*
* If we're just using this capability to anchor the list,
* hide the real ID. Only count real ecaps. XXX PCI spec
* indicates to use cap id = 0, version = 0, next = 0 if
* ecaps are absent, hope users check all the way to next.
*/
if (hidden)
*(__le32 *)&vdev->vconfig[epos] &=
cpu_to_le32((0xffcU << 20));
else
ecaps++;
prev = (__le32 *)&vdev->vconfig[epos];
epos = PCI_EXT_CAP_NEXT(header);
}
if (!ecaps)
*(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0;
return 0;
}
/*
* Nag about hardware bugs, hopefully to have vendors fix them, but at least
* to collect a list of dependencies for the VF INTx pin quirk below.
*/
static const struct pci_device_id known_bogus_vf_intx_pin[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) },
{}
};
/*
* For each device we allocate a pci_config_map that indicates the
* capability occupying each dword and thus the struct perm_bits we
* use for read and write. We also allocate a virtualized config
* space which tracks reads and writes to bits that we emulate for
* the user. Initial values filled from device.
*
* Using shared struct perm_bits between all vfio-pci devices saves
* us from allocating cfg_size buffers for virt and write for every
* device. We could remove vconfig and allocate individual buffers
* for each area requiring emulated bits, but the array of pointers
* would be comparable in size (at least for standard config space).
*/
int vfio_config_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u8 *map, *vconfig;
int ret;
/*
* Config space, caps and ecaps are all dword aligned, so we could
* use one byte per dword to record the type. However, there are
* no requirements on the length of a capability, so the gap between
* capabilities needs byte granularity.
*/
map = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT);
if (!map)
return -ENOMEM;
vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT);
if (!vconfig) {
kfree(map);
return -ENOMEM;
}
vdev->pci_config_map = map;
vdev->vconfig = vconfig;
memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
pdev->cfg_size - PCI_STD_HEADER_SIZEOF);
ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
if (ret)
goto out;
vdev->bardirty = true;
/*
* XXX can we just pci_load_saved_state/pci_restore_state?
* may need to rebuild vconfig after that
*/
/* For restore after reset */
vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]);
vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]);
vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]);
vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]);
vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]);
vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]);
vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]);
if (pdev->is_virtfn) {
*(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
/*
* Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register
* does not apply to VFs and VFs must implement this register
* as read-only with value zero. Userspace is not readily able
* to identify whether a device is a VF and thus that the pin
* definition on the device is bogus should it violate this
* requirement. We already virtualize the pin register for
* other purposes, so we simply need to replace the bogus value
* and consider VFs when we determine INTx IRQ count.
*/
if (vconfig[PCI_INTERRUPT_PIN] &&
!pci_match_id(known_bogus_vf_intx_pin, pdev))
pci_warn(pdev,
"Hardware bug: VF reports bogus INTx pin %d\n",
vconfig[PCI_INTERRUPT_PIN]);
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
}
if (pdev->no_command_memory) {
/*
* VFs and devices that set pdev->no_command_memory do not
* implement the memory enable bit of the COMMAND register
* therefore we'll not have it set in our initial copy of
* config space after pci_enable_device(). For consistency
* with PFs, set the virtual enable bit here.
*/
*(__le16 *)&vconfig[PCI_COMMAND] |=
cpu_to_le16(PCI_COMMAND_MEMORY);
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
vconfig[PCI_INTERRUPT_PIN] = 0;
ret = vfio_cap_init(vdev);
if (ret)
goto out;
ret = vfio_ecap_init(vdev);
if (ret)
goto out;
return 0;
out:
kfree(map);
vdev->pci_config_map = NULL;
kfree(vconfig);
vdev->vconfig = NULL;
return pcibios_err_to_errno(ret);
}
void vfio_config_free(struct vfio_pci_core_device *vdev)
{
kfree(vdev->vconfig);
vdev->vconfig = NULL;
kfree(vdev->pci_config_map);
vdev->pci_config_map = NULL;
if (vdev->msi_perm) {
free_perm_bits(vdev->msi_perm);
kfree(vdev->msi_perm);
vdev->msi_perm = NULL;
}
}
/*
* Find the remaining number of bytes in a dword that match the given
* position. Stop at either the end of the capability or the dword boundary.
*/
static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_core_device *vdev,
loff_t pos)
{
u8 cap = vdev->pci_config_map[pos];
size_t i;
for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++)
/* nop */;
return i;
}
static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
struct pci_dev *pdev = vdev->pdev;
struct perm_bits *perm;
__le32 val = 0;
int cap_start = 0, offset;
u8 cap_id;
ssize_t ret;
if (*ppos < 0 || *ppos >= pdev->cfg_size ||
*ppos + count > pdev->cfg_size)
return -EFAULT;
/*
* Chop accesses into aligned chunks containing no more than a
* single capability. Caller increments to the next chunk.
*/
count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos));
if (count >= 4 && !(*ppos % 4))
count = 4;
else if (count >= 2 && !(*ppos % 2))
count = 2;
else
count = 1;
ret = count;
cap_id = vdev->pci_config_map[*ppos];
if (cap_id == PCI_CAP_ID_INVALID) {
perm = &unassigned_perms;
cap_start = *ppos;
} else if (cap_id == PCI_CAP_ID_INVALID_VIRT) {
perm = &virt_perms;
cap_start = *ppos;
} else {
if (*ppos >= PCI_CFG_SPACE_SIZE) {
WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
perm = &ecap_perms[cap_id];
cap_start = vfio_find_cap_start(vdev, *ppos);
} else {
WARN_ON(cap_id > PCI_CAP_ID_MAX);
perm = &cap_perms[cap_id];
if (cap_id == PCI_CAP_ID_MSI)
perm = vdev->msi_perm;
if (cap_id > PCI_CAP_ID_BASIC)
cap_start = vfio_find_cap_start(vdev, *ppos);
}
}
WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
WARN_ON(cap_start > *ppos);
offset = *ppos - cap_start;
if (iswrite) {
if (!perm->writefn)
return ret;
if (copy_from_user(&val, buf, count))
return -EFAULT;
ret = perm->writefn(vdev, *ppos, count, perm, offset, val);
} else {
if (perm->readfn) {
ret = perm->readfn(vdev, *ppos, count,
perm, offset, &val);
if (ret < 0)
return ret;
}
if (copy_to_user(buf, &val, count))
return -EFAULT;
}
return ret;
}
ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
size_t done = 0;
int ret = 0;
loff_t pos = *ppos;
pos &= VFIO_PCI_OFFSET_MASK;
while (count) {
ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite);
if (ret < 0)
return ret;
count -= ret;
done += ret;
buf += ret;
pos += ret;
}
*ppos += done;
return done;
}
| linux-master | drivers/vfio/pci/vfio_pci_config.c |
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
#include "cmd.h"
enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
static int mlx5vf_is_migratable(struct mlx5_core_dev *mdev, u16 func_id)
{
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
void *query_cap = NULL, *cap;
int ret;
query_cap = kzalloc(query_sz, GFP_KERNEL);
if (!query_cap)
return -ENOMEM;
ret = mlx5_vport_get_other_func_cap(mdev, func_id, query_cap,
MLX5_CAP_GENERAL_2);
if (ret)
goto out;
cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
if (!MLX5_GET(cmd_hca_cap_2, cap, migratable))
ret = -EOPNOTSUPP;
out:
kfree(query_cap);
return ret;
}
static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id);
static void
_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{
struct mlx5_vf_migration_file *migf = mvdev->saving_migf;
u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
int err;
lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN;
/*
* In case PRE_COPY is used, saving_migf is exposed while the device is
* running. Make sure to run only once there is no active save command.
* Running both in parallel, might end-up with a failure in the save
* command once it will try to turn on 'tracking' on a suspended device.
*/
if (migf) {
err = wait_for_completion_interruptible(&migf->save_comp);
if (err)
return err;
}
MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
err = mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out);
if (migf)
complete(&migf->save_comp);
return err;
}
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
{
u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN;
MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out);
}
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size, u8 query_flags)
{
u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
bool inc = query_flags & MLX5VF_QUERY_INC;
int ret;
lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN;
/*
* In case PRE_COPY is used, saving_migf is exposed while device is
* running. Make sure to run only once there is no active save command.
* Running both in parallel, might end-up with a failure in the
* incremental query command on un-tracked vhca.
*/
if (inc) {
ret = wait_for_completion_interruptible(&mvdev->saving_migf->save_comp);
if (ret)
return ret;
if (mvdev->saving_migf->state ==
MLX5_MIGF_STATE_PRE_COPY_ERROR) {
/*
* In case we had a PRE_COPY error, only query full
* image for final image
*/
if (!(query_flags & MLX5VF_QUERY_FINAL)) {
*state_size = 0;
complete(&mvdev->saving_migf->save_comp);
return 0;
}
query_flags &= ~MLX5VF_QUERY_INC;
}
}
MLX5_SET(query_vhca_migration_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
MLX5_SET(query_vhca_migration_state_in, in, incremental,
query_flags & MLX5VF_QUERY_INC);
ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in,
out);
if (inc)
complete(&mvdev->saving_migf->save_comp);
if (ret)
return ret;
*state_size = MLX5_GET(query_vhca_migration_state_out, out,
required_umem_size);
return 0;
}
static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
{
/* Mark the tracker under an error and wake it up if it's running */
mvdev->tracker.is_err = true;
complete(&mvdev->tracker_comp);
}
static int mlx5fv_vf_event(struct notifier_block *nb,
unsigned long event, void *data)
{
struct mlx5vf_pci_core_device *mvdev =
container_of(nb, struct mlx5vf_pci_core_device, nb);
switch (event) {
case MLX5_PF_NOTIFY_ENABLE_VF:
mutex_lock(&mvdev->state_mutex);
mvdev->mdev_detach = false;
mlx5vf_state_mutex_unlock(mvdev);
break;
case MLX5_PF_NOTIFY_DISABLE_VF:
mlx5vf_cmd_close_migratable(mvdev);
mutex_lock(&mvdev->state_mutex);
mvdev->mdev_detach = true;
mlx5vf_state_mutex_unlock(mvdev);
break;
default:
break;
}
return 0;
}
void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
{
if (!mvdev->migrate_cap)
return;
/* Must be done outside the lock to let it progress */
set_tracker_error(mvdev);
mutex_lock(&mvdev->state_mutex);
mlx5vf_disable_fds(mvdev);
_mlx5vf_free_page_tracker_resources(mvdev);
mlx5vf_state_mutex_unlock(mvdev);
}
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
{
if (!mvdev->migrate_cap)
return;
mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id,
&mvdev->nb);
destroy_workqueue(mvdev->cb_wq);
}
void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
const struct vfio_migration_ops *mig_ops,
const struct vfio_log_ops *log_ops)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
if (!pdev->is_virtfn)
return;
mvdev->mdev = mlx5_vf_get_core_dev(pdev);
if (!mvdev->mdev)
return;
if (!MLX5_CAP_GEN(mvdev->mdev, migration))
goto end;
mvdev->vf_id = pci_iov_vf_id(pdev);
if (mvdev->vf_id < 0)
goto end;
ret = mlx5vf_is_migratable(mvdev->mdev, mvdev->vf_id + 1);
if (ret)
goto end;
if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1,
&mvdev->vhca_id))
goto end;
mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
if (!mvdev->cb_wq)
goto end;
mutex_init(&mvdev->state_mutex);
spin_lock_init(&mvdev->reset_lock);
mvdev->nb.notifier_call = mlx5fv_vf_event;
ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id,
&mvdev->nb);
if (ret) {
destroy_workqueue(mvdev->cb_wq);
goto end;
}
mvdev->migrate_cap = 1;
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
mvdev->core_device.vdev.mig_ops = mig_ops;
init_completion(&mvdev->tracker_comp);
if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization))
mvdev->core_device.vdev.log_ops = log_ops;
if (MLX5_CAP_GEN_2(mvdev->mdev, migration_multi_load) &&
MLX5_CAP_GEN_2(mvdev->mdev, migration_tracking_state))
mvdev->core_device.vdev.migration_flags |=
VFIO_MIGRATION_PRE_COPY;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
}
static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
u16 *vhca_id)
{
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
int out_size;
void *out;
int ret;
out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
out = kzalloc(out_size, GFP_KERNEL);
if (!out)
return -ENOMEM;
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, other_function, 1);
MLX5_SET(query_hca_cap_in, in, function_id, function_id);
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
HCA_CAP_OPMOD_GET_CUR);
ret = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
if (ret)
goto err_exec;
*vhca_id = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap.vhca_id);
err_exec:
kfree(out);
return ret;
}
static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_vhca_data_buffer *buf,
struct mlx5_vhca_recv_buf *recv_buf,
u32 *mkey)
{
size_t npages = buf ? DIV_ROUND_UP(buf->allocated_length, PAGE_SIZE) :
recv_buf->npages;
int err = 0, inlen;
__be64 *mtt;
void *mkc;
u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
sizeof(*mtt) * round_up(npages, 2);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
DIV_ROUND_UP(npages, 2));
mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
if (buf) {
struct sg_dma_page_iter dma_iter;
for_each_sgtable_dma_page(&buf->table.sgt, &dma_iter, 0)
*mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
} else {
int i;
for (i = 0; i < npages; i++)
*mtt++ = cpu_to_be64(recv_buf->dma_addrs[i]);
}
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, rr, 1);
MLX5_SET(mkc, mkc, rw, 1);
MLX5_SET(mkc, mkc, pd, pdn);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
MLX5_SET64(mkc, mkc, len, npages * PAGE_SIZE);
err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
kvfree(in);
return err;
}
static int mlx5vf_dma_data_buffer(struct mlx5_vhca_data_buffer *buf)
{
struct mlx5vf_pci_core_device *mvdev = buf->migf->mvdev;
struct mlx5_core_dev *mdev = mvdev->mdev;
int ret;
lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN;
if (buf->dmaed || !buf->allocated_length)
return -EINVAL;
ret = dma_map_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0);
if (ret)
return ret;
ret = _create_mkey(mdev, buf->migf->pdn, buf, NULL, &buf->mkey);
if (ret)
goto err;
buf->dmaed = true;
return 0;
err:
dma_unmap_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0);
return ret;
}
void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
{
struct mlx5_vf_migration_file *migf = buf->migf;
struct sg_page_iter sg_iter;
lockdep_assert_held(&migf->mvdev->state_mutex);
WARN_ON(migf->mvdev->mdev_detach);
if (buf->dmaed) {
mlx5_core_destroy_mkey(migf->mvdev->mdev, buf->mkey);
dma_unmap_sgtable(migf->mvdev->mdev->device, &buf->table.sgt,
buf->dma_dir, 0);
}
/* Undo alloc_pages_bulk_array() */
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
kfree(buf);
}
struct mlx5_vhca_data_buffer *
mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
size_t length,
enum dma_data_direction dma_dir)
{
struct mlx5_vhca_data_buffer *buf;
int ret;
buf = kzalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
return ERR_PTR(-ENOMEM);
buf->dma_dir = dma_dir;
buf->migf = migf;
if (length) {
ret = mlx5vf_add_migration_pages(buf,
DIV_ROUND_UP_ULL(length, PAGE_SIZE));
if (ret)
goto end;
if (dma_dir != DMA_NONE) {
ret = mlx5vf_dma_data_buffer(buf);
if (ret)
goto end;
}
}
return buf;
end:
mlx5vf_free_data_buffer(buf);
return ERR_PTR(ret);
}
void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf)
{
spin_lock_irq(&buf->migf->list_lock);
list_add_tail(&buf->buf_elm, &buf->migf->avail_list);
spin_unlock_irq(&buf->migf->list_lock);
}
struct mlx5_vhca_data_buffer *
mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
size_t length, enum dma_data_direction dma_dir)
{
struct mlx5_vhca_data_buffer *buf, *temp_buf;
struct list_head free_list;
lockdep_assert_held(&migf->mvdev->state_mutex);
if (migf->mvdev->mdev_detach)
return ERR_PTR(-ENOTCONN);
INIT_LIST_HEAD(&free_list);
spin_lock_irq(&migf->list_lock);
list_for_each_entry_safe(buf, temp_buf, &migf->avail_list, buf_elm) {
if (buf->dma_dir == dma_dir) {
list_del_init(&buf->buf_elm);
if (buf->allocated_length >= length) {
spin_unlock_irq(&migf->list_lock);
goto found;
}
/*
* Prevent holding redundant buffers. Put in a free
* list and call at the end not under the spin lock
* (&migf->list_lock) to mlx5vf_free_data_buffer which
* might sleep.
*/
list_add(&buf->buf_elm, &free_list);
}
}
spin_unlock_irq(&migf->list_lock);
buf = mlx5vf_alloc_data_buffer(migf, length, dma_dir);
found:
while ((temp_buf = list_first_entry_or_null(&free_list,
struct mlx5_vhca_data_buffer, buf_elm))) {
list_del(&temp_buf->buf_elm);
mlx5vf_free_data_buffer(temp_buf);
}
return buf;
}
void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
{
struct mlx5vf_async_data *async_data = container_of(_work,
struct mlx5vf_async_data, work);
struct mlx5_vf_migration_file *migf = container_of(async_data,
struct mlx5_vf_migration_file, async_data);
mutex_lock(&migf->lock);
if (async_data->status) {
mlx5vf_put_data_buffer(async_data->buf);
if (async_data->header_buf)
mlx5vf_put_data_buffer(async_data->header_buf);
if (async_data->status == MLX5_CMD_STAT_BAD_RES_STATE_ERR)
migf->state = MLX5_MIGF_STATE_PRE_COPY_ERROR;
else
migf->state = MLX5_MIGF_STATE_ERROR;
wake_up_interruptible(&migf->poll_wait);
}
mutex_unlock(&migf->lock);
kvfree(async_data->out);
complete(&migf->save_comp);
fput(migf->filp);
}
static int add_buf_header(struct mlx5_vhca_data_buffer *header_buf,
size_t image_size, bool initial_pre_copy)
{
struct mlx5_vf_migration_file *migf = header_buf->migf;
struct mlx5_vf_migration_header header = {};
unsigned long flags;
struct page *page;
u8 *to_buff;
header.record_size = cpu_to_le64(image_size);
header.flags = cpu_to_le32(MLX5_MIGF_HEADER_FLAGS_TAG_MANDATORY);
header.tag = cpu_to_le32(MLX5_MIGF_HEADER_TAG_FW_DATA);
page = mlx5vf_get_migration_page(header_buf, 0);
if (!page)
return -EINVAL;
to_buff = kmap_local_page(page);
memcpy(to_buff, &header, sizeof(header));
kunmap_local(to_buff);
header_buf->length = sizeof(header);
header_buf->start_pos = header_buf->migf->max_pos;
migf->max_pos += header_buf->length;
spin_lock_irqsave(&migf->list_lock, flags);
list_add_tail(&header_buf->buf_elm, &migf->buf_list);
spin_unlock_irqrestore(&migf->list_lock, flags);
if (initial_pre_copy)
migf->pre_copy_initial_bytes += sizeof(header);
return 0;
}
static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
{
struct mlx5vf_async_data *async_data = container_of(context,
struct mlx5vf_async_data, cb_work);
struct mlx5_vf_migration_file *migf = container_of(async_data,
struct mlx5_vf_migration_file, async_data);
if (!status) {
size_t image_size;
unsigned long flags;
bool initial_pre_copy = migf->state != MLX5_MIGF_STATE_PRE_COPY &&
!async_data->last_chunk;
image_size = MLX5_GET(save_vhca_state_out, async_data->out,
actual_image_size);
if (async_data->header_buf) {
status = add_buf_header(async_data->header_buf, image_size,
initial_pre_copy);
if (status)
goto err;
}
async_data->buf->length = image_size;
async_data->buf->start_pos = migf->max_pos;
migf->max_pos += async_data->buf->length;
spin_lock_irqsave(&migf->list_lock, flags);
list_add_tail(&async_data->buf->buf_elm, &migf->buf_list);
spin_unlock_irqrestore(&migf->list_lock, flags);
if (initial_pre_copy)
migf->pre_copy_initial_bytes += image_size;
migf->state = async_data->last_chunk ?
MLX5_MIGF_STATE_COMPLETE : MLX5_MIGF_STATE_PRE_COPY;
wake_up_interruptible(&migf->poll_wait);
}
err:
/*
* The error and the cleanup flows can't run from an
* interrupt context
*/
if (status == -EREMOTEIO)
status = MLX5_GET(save_vhca_state_out, async_data->out, status);
async_data->status = status;
queue_work(migf->mvdev->cb_wq, &async_data->work);
}
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *buf, bool inc,
bool track)
{
u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out);
u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
struct mlx5_vhca_data_buffer *header_buf = NULL;
struct mlx5vf_async_data *async_data;
int err;
lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN;
err = wait_for_completion_interruptible(&migf->save_comp);
if (err)
return err;
if (migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR)
/*
* In case we had a PRE_COPY error, SAVE is triggered only for
* the final image, read device full image.
*/
inc = false;
MLX5_SET(save_vhca_state_in, in, opcode,
MLX5_CMD_OP_SAVE_VHCA_STATE);
MLX5_SET(save_vhca_state_in, in, op_mod, 0);
MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(save_vhca_state_in, in, mkey, buf->mkey);
MLX5_SET(save_vhca_state_in, in, size, buf->allocated_length);
MLX5_SET(save_vhca_state_in, in, incremental, inc);
MLX5_SET(save_vhca_state_in, in, set_track, track);
async_data = &migf->async_data;
async_data->buf = buf;
async_data->last_chunk = !track;
async_data->out = kvzalloc(out_size, GFP_KERNEL);
if (!async_data->out) {
err = -ENOMEM;
goto err_out;
}
if (MLX5VF_PRE_COPY_SUPP(mvdev)) {
if (async_data->last_chunk && migf->buf_header) {
header_buf = migf->buf_header;
migf->buf_header = NULL;
} else {
header_buf = mlx5vf_get_data_buffer(migf,
sizeof(struct mlx5_vf_migration_header), DMA_NONE);
if (IS_ERR(header_buf)) {
err = PTR_ERR(header_buf);
goto err_free;
}
}
}
if (async_data->last_chunk)
migf->state = MLX5_MIGF_STATE_SAVE_LAST;
async_data->header_buf = header_buf;
get_file(migf->filp);
err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in),
async_data->out,
out_size, mlx5vf_save_callback,
&async_data->cb_work);
if (err)
goto err_exec;
return 0;
err_exec:
if (header_buf)
mlx5vf_put_data_buffer(header_buf);
fput(migf->filp);
err_free:
kvfree(async_data->out);
err_out:
complete(&migf->save_comp);
return err;
}
int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *buf)
{
u32 out[MLX5_ST_SZ_DW(load_vhca_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(load_vhca_state_in)] = {};
int err;
lockdep_assert_held(&mvdev->state_mutex);
if (mvdev->mdev_detach)
return -ENOTCONN;
if (!buf->dmaed) {
err = mlx5vf_dma_data_buffer(buf);
if (err)
return err;
}
MLX5_SET(load_vhca_state_in, in, opcode,
MLX5_CMD_OP_LOAD_VHCA_STATE);
MLX5_SET(load_vhca_state_in, in, op_mod, 0);
MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(load_vhca_state_in, in, mkey, buf->mkey);
MLX5_SET(load_vhca_state_in, in, size, buf->length);
return mlx5_cmd_exec_inout(mvdev->mdev, load_vhca_state, in, out);
}
int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf)
{
int err;
lockdep_assert_held(&migf->mvdev->state_mutex);
if (migf->mvdev->mdev_detach)
return -ENOTCONN;
err = mlx5_core_alloc_pd(migf->mvdev->mdev, &migf->pdn);
return err;
}
void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf)
{
lockdep_assert_held(&migf->mvdev->state_mutex);
if (migf->mvdev->mdev_detach)
return;
mlx5_core_dealloc_pd(migf->mvdev->mdev, migf->pdn);
}
void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf)
{
struct mlx5_vhca_data_buffer *entry;
lockdep_assert_held(&migf->mvdev->state_mutex);
WARN_ON(migf->mvdev->mdev_detach);
if (migf->buf) {
mlx5vf_free_data_buffer(migf->buf);
migf->buf = NULL;
}
if (migf->buf_header) {
mlx5vf_free_data_buffer(migf->buf_header);
migf->buf_header = NULL;
}
list_splice(&migf->avail_list, &migf->buf_list);
while ((entry = list_first_entry_or_null(&migf->buf_list,
struct mlx5_vhca_data_buffer, buf_elm))) {
list_del(&entry->buf_elm);
mlx5vf_free_data_buffer(entry);
}
mlx5vf_cmd_dealloc_pd(migf);
}
static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
struct mlx5vf_pci_core_device *mvdev,
struct rb_root_cached *ranges, u32 nnodes)
{
int max_num_range =
MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_max_num_range);
struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
int record_size = MLX5_ST_SZ_BYTES(page_track_range);
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
struct interval_tree_node *node = NULL;
u64 total_ranges_len = 0;
u32 num_ranges = nnodes;
u8 log_addr_space_size;
void *range_list_ptr;
void *obj_context;
void *cmd_hdr;
int inlen;
void *in;
int err;
int i;
if (num_ranges > max_num_range) {
vfio_combine_iova_ranges(ranges, nnodes, max_num_range);
num_ranges = max_num_range;
}
inlen = MLX5_ST_SZ_BYTES(create_page_track_obj_in) +
record_size * num_ranges;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
cmd_hdr = MLX5_ADDR_OF(create_page_track_obj_in, in,
general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type,
MLX5_OBJ_TYPE_PAGE_TRACK);
obj_context = MLX5_ADDR_OF(create_page_track_obj_in, in, obj_context);
MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id);
MLX5_SET(page_track, obj_context, track_type, 1);
MLX5_SET(page_track, obj_context, log_page_size,
ilog2(tracker->host_qp->tracked_page_size));
MLX5_SET(page_track, obj_context, log_msg_size,
ilog2(tracker->host_qp->max_msg_size));
MLX5_SET(page_track, obj_context, reporting_qpn, tracker->fw_qp->qpn);
MLX5_SET(page_track, obj_context, num_ranges, num_ranges);
range_list_ptr = MLX5_ADDR_OF(page_track, obj_context, track_range);
node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
for (i = 0; i < num_ranges; i++) {
void *addr_range_i_base = range_list_ptr + record_size * i;
unsigned long length = node->last - node->start + 1;
MLX5_SET64(page_track_range, addr_range_i_base, start_address,
node->start);
MLX5_SET64(page_track_range, addr_range_i_base, length, length);
total_ranges_len += length;
node = interval_tree_iter_next(node, 0, ULONG_MAX);
}
WARN_ON(node);
log_addr_space_size = ilog2(roundup_pow_of_two(total_ranges_len));
if (log_addr_space_size <
(MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_min_addr_space)) ||
log_addr_space_size >
(MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_addr_space))) {
err = -EOPNOTSUPP;
goto out;
}
MLX5_SET(page_track, obj_context, log_addr_space_size,
log_addr_space_size);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
goto out;
tracker->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
out:
kfree(in);
return err;
}
static int mlx5vf_cmd_destroy_tracker(struct mlx5_core_dev *mdev,
u32 tracker_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, tracker_id);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static int mlx5vf_cmd_modify_tracker(struct mlx5_core_dev *mdev,
u32 tracker_id, unsigned long iova,
unsigned long length, u32 tracker_state)
{
u32 in[MLX5_ST_SZ_DW(modify_page_track_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
void *obj_context;
void *cmd_hdr;
cmd_hdr = MLX5_ADDR_OF(modify_page_track_obj_in, in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, tracker_id);
obj_context = MLX5_ADDR_OF(modify_page_track_obj_in, in, obj_context);
MLX5_SET64(page_track, obj_context, modify_field_select, 0x3);
MLX5_SET64(page_track, obj_context, range_start_address, iova);
MLX5_SET64(page_track, obj_context, length, length);
MLX5_SET(page_track, obj_context, state, tracker_state);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev,
struct mlx5_vhca_cq_buf *buf, int nent,
int cqe_size)
{
struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
u8 log_wq_sz = ilog2(cqe_size);
int err;
err = mlx5_frag_buf_alloc_node(mdev, nent * cqe_size, frag_buf,
mdev->priv.numa_node);
if (err)
return err;
mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
buf->cqe_size = cqe_size;
buf->nent = nent;
return 0;
}
static void init_cq_frag_buf(struct mlx5_vhca_cq_buf *buf)
{
struct mlx5_cqe64 *cqe64;
void *cqe;
int i;
for (i = 0; i < buf->nent; i++) {
cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
}
static void mlx5vf_destroy_cq(struct mlx5_core_dev *mdev,
struct mlx5_vhca_cq *cq)
{
mlx5_core_destroy_cq(mdev, &cq->mcq);
mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
mlx5_db_free(mdev, &cq->db);
}
static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
{
if (type != MLX5_EVENT_TYPE_CQ_ERROR)
return;
set_tracker_error(container_of(mcq, struct mlx5vf_pci_core_device,
tracker.cq.mcq));
}
static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
void *data)
{
struct mlx5_vhca_page_tracker *tracker =
mlx5_nb_cof(nb, struct mlx5_vhca_page_tracker, nb);
struct mlx5vf_pci_core_device *mvdev = container_of(
tracker, struct mlx5vf_pci_core_device, tracker);
struct mlx5_eqe *eqe = data;
u8 event_type = (u8)type;
u8 queue_type;
int qp_num;
switch (event_type) {
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
queue_type = eqe->data.qp_srq.type;
if (queue_type != MLX5_EVENT_QUEUE_TYPE_QP)
break;
qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
if (qp_num != tracker->host_qp->qpn &&
qp_num != tracker->fw_qp->qpn)
break;
set_tracker_error(mvdev);
break;
default:
break;
}
return NOTIFY_OK;
}
static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{
struct mlx5vf_pci_core_device *mvdev =
container_of(mcq, struct mlx5vf_pci_core_device,
tracker.cq.mcq);
complete(&mvdev->tracker_comp);
}
static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_vhca_page_tracker *tracker,
size_t ncqe)
{
int cqe_size = cache_line_size() == 128 ? 128 : 64;
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_vhca_cq *cq;
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
int vector;
cq = &tracker->cq;
ncqe = roundup_pow_of_two(ncqe);
err = mlx5_db_alloc_node(mdev, &cq->db, mdev->priv.numa_node);
if (err)
return err;
cq->ncqe = ncqe;
cq->mcq.set_ci_db = cq->db.db;
cq->mcq.arm_db = cq->db.db + 1;
cq->mcq.cqe_sz = cqe_size;
err = alloc_cq_frag_buf(mdev, &cq->buf, ncqe, cqe_size);
if (err)
goto err_db_free;
init_cq_frag_buf(&cq->buf);
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
cq->buf.frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_buff;
}
vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
err = mlx5_comp_eqn_get(mdev, vector, &eqn);
if (err)
goto err_vec;
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, tracker->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->buf.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
cq->mcq.comp = mlx5vf_cq_complete;
cq->mcq.event = mlx5vf_cq_event;
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
if (err)
goto err_vec;
mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
cq->mcq.cons_index);
kvfree(in);
return 0;
err_vec:
kvfree(in);
err_buff:
mlx5_frag_buf_free(mdev, &cq->buf.frag_buf);
err_db_free:
mlx5_db_free(mdev, &cq->db);
return err;
}
static struct mlx5_vhca_qp *
mlx5vf_create_rc_qp(struct mlx5_core_dev *mdev,
struct mlx5_vhca_page_tracker *tracker, u32 max_recv_wr)
{
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
struct mlx5_vhca_qp *qp;
u8 log_rq_stride;
u8 log_rq_sz;
void *qpc;
int inlen;
void *in;
int err;
qp = kzalloc(sizeof(*qp), GFP_KERNEL_ACCOUNT);
if (!qp)
return ERR_PTR(-ENOMEM);
err = mlx5_db_alloc_node(mdev, &qp->db, mdev->priv.numa_node);
if (err)
goto err_free;
if (max_recv_wr) {
qp->rq.wqe_cnt = roundup_pow_of_two(max_recv_wr);
log_rq_stride = ilog2(MLX5_SEND_WQE_DS);
log_rq_sz = ilog2(qp->rq.wqe_cnt);
err = mlx5_frag_buf_alloc_node(mdev,
wq_get_byte_sz(log_rq_sz, log_rq_stride),
&qp->buf, mdev->priv.numa_node);
if (err)
goto err_db_free;
mlx5_init_fbc(qp->buf.frags, log_rq_stride, log_rq_sz, &qp->rq.fbc);
}
qp->rq.db = &qp->db.db[MLX5_RCV_DBR];
inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
qp->buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_in;
}
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, pd, tracker->pdn);
MLX5_SET(qpc, qpc, uar_page, tracker->uar->index);
MLX5_SET(qpc, qpc, log_page_size,
qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
MLX5_SET(qpc, qpc, no_sq, 1);
if (max_recv_wr) {
MLX5_SET(qpc, qpc, cqn_rcv, tracker->cq.mcq.cqn);
MLX5_SET(qpc, qpc, log_rq_stride, log_rq_stride - 4);
MLX5_SET(qpc, qpc, log_rq_size, log_rq_sz);
MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
mlx5_fill_page_frag_array(&qp->buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in,
in, pas));
} else {
MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
}
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
goto err_in;
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
return qp;
err_in:
if (max_recv_wr)
mlx5_frag_buf_free(mdev, &qp->buf);
err_db_free:
mlx5_db_free(mdev, &qp->db);
err_free:
kfree(qp);
return ERR_PTR(err);
}
static void mlx5vf_post_recv(struct mlx5_vhca_qp *qp)
{
struct mlx5_wqe_data_seg *data;
unsigned int ix;
WARN_ON(qp->rq.pc - qp->rq.cc >= qp->rq.wqe_cnt);
ix = qp->rq.pc & (qp->rq.wqe_cnt - 1);
data = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ix);
data->byte_count = cpu_to_be32(qp->max_msg_size);
data->lkey = cpu_to_be32(qp->recv_buf.mkey);
data->addr = cpu_to_be64(qp->recv_buf.next_rq_offset);
qp->rq.pc++;
/* Make sure that descriptors are written before doorbell record. */
dma_wmb();
*qp->rq.db = cpu_to_be32(qp->rq.pc & 0xffff);
}
static int mlx5vf_activate_qp(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp, u32 remote_qpn,
bool host_qp)
{
u32 init_in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
u32 rtr_in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
u32 rts_in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
void *qpc;
int ret;
/* Init */
qpc = MLX5_ADDR_OF(rst2init_qp_in, init_in, qpc);
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
MLX5_SET(qpc, qpc, rre, 1);
MLX5_SET(qpc, qpc, rwe, 1);
MLX5_SET(rst2init_qp_in, init_in, opcode, MLX5_CMD_OP_RST2INIT_QP);
MLX5_SET(rst2init_qp_in, init_in, qpn, qp->qpn);
ret = mlx5_cmd_exec_in(mdev, rst2init_qp, init_in);
if (ret)
return ret;
if (host_qp) {
struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
int i;
for (i = 0; i < qp->rq.wqe_cnt; i++) {
mlx5vf_post_recv(qp);
recv_buf->next_rq_offset += qp->max_msg_size;
}
}
/* RTR */
qpc = MLX5_ADDR_OF(init2rtr_qp_in, rtr_in, qpc);
MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
MLX5_SET(qpc, qpc, log_msg_max, MLX5_CAP_GEN(mdev, log_max_msg));
MLX5_SET(qpc, qpc, remote_qpn, remote_qpn);
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
MLX5_SET(qpc, qpc, min_rnr_nak, 1);
MLX5_SET(init2rtr_qp_in, rtr_in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn);
ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, rtr_in);
if (ret || host_qp)
return ret;
/* RTS */
qpc = MLX5_ADDR_OF(rtr2rts_qp_in, rts_in, qpc);
MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
MLX5_SET(qpc, qpc, retry_count, 7);
MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
MLX5_SET(rtr2rts_qp_in, rts_in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn);
return mlx5_cmd_exec_in(mdev, rtr2rts_qp, rts_in);
}
static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp)
{
u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
mlx5_cmd_exec_in(mdev, destroy_qp, in);
mlx5_frag_buf_free(mdev, &qp->buf);
mlx5_db_free(mdev, &qp->db);
kfree(qp);
}
static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
{
int i;
/* Undo alloc_pages_bulk_array() */
for (i = 0; i < recv_buf->npages; i++)
__free_page(recv_buf->page_list[i]);
kvfree(recv_buf->page_list);
}
static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
unsigned int npages)
{
unsigned int filled = 0, done = 0;
int i;
recv_buf->page_list = kvcalloc(npages, sizeof(*recv_buf->page_list),
GFP_KERNEL_ACCOUNT);
if (!recv_buf->page_list)
return -ENOMEM;
for (;;) {
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT,
npages - done,
recv_buf->page_list + done);
if (!filled)
goto err;
done += filled;
if (done == npages)
break;
}
recv_buf->npages = npages;
return 0;
err:
for (i = 0; i < npages; i++) {
if (recv_buf->page_list[i])
__free_page(recv_buf->page_list[i]);
}
kvfree(recv_buf->page_list);
return -ENOMEM;
}
static int register_dma_recv_pages(struct mlx5_core_dev *mdev,
struct mlx5_vhca_recv_buf *recv_buf)
{
int i, j;
recv_buf->dma_addrs = kvcalloc(recv_buf->npages,
sizeof(*recv_buf->dma_addrs),
GFP_KERNEL_ACCOUNT);
if (!recv_buf->dma_addrs)
return -ENOMEM;
for (i = 0; i < recv_buf->npages; i++) {
recv_buf->dma_addrs[i] = dma_map_page(mdev->device,
recv_buf->page_list[i],
0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(mdev->device, recv_buf->dma_addrs[i]))
goto error;
}
return 0;
error:
for (j = 0; j < i; j++)
dma_unmap_single(mdev->device, recv_buf->dma_addrs[j],
PAGE_SIZE, DMA_FROM_DEVICE);
kvfree(recv_buf->dma_addrs);
return -ENOMEM;
}
static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev,
struct mlx5_vhca_recv_buf *recv_buf)
{
int i;
for (i = 0; i < recv_buf->npages; i++)
dma_unmap_single(mdev->device, recv_buf->dma_addrs[i],
PAGE_SIZE, DMA_FROM_DEVICE);
kvfree(recv_buf->dma_addrs);
}
static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp)
{
struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
mlx5_core_destroy_mkey(mdev, recv_buf->mkey);
unregister_dma_recv_pages(mdev, recv_buf);
free_recv_pages(&qp->recv_buf);
}
static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp, u32 pdn,
u64 rq_size)
{
unsigned int npages = DIV_ROUND_UP_ULL(rq_size, PAGE_SIZE);
struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
int err;
err = alloc_recv_pages(recv_buf, npages);
if (err < 0)
return err;
err = register_dma_recv_pages(mdev, recv_buf);
if (err)
goto end;
err = _create_mkey(mdev, pdn, NULL, recv_buf, &recv_buf->mkey);
if (err)
goto err_create_mkey;
return 0;
err_create_mkey:
unregister_dma_recv_pages(mdev, recv_buf);
end:
free_recv_pages(recv_buf);
return err;
}
static void
_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
{
struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
struct mlx5_core_dev *mdev = mvdev->mdev;
lockdep_assert_held(&mvdev->state_mutex);
if (!mvdev->log_active)
return;
WARN_ON(mvdev->mdev_detach);
mlx5_eq_notifier_unregister(mdev, &tracker->nb);
mlx5vf_cmd_destroy_tracker(mdev, tracker->id);
mlx5vf_destroy_qp(mdev, tracker->fw_qp);
mlx5vf_free_qp_recv_resources(mdev, tracker->host_qp);
mlx5vf_destroy_qp(mdev, tracker->host_qp);
mlx5vf_destroy_cq(mdev, &tracker->cq);
mlx5_core_dealloc_pd(mdev, tracker->pdn);
mlx5_put_uars_page(mdev, tracker->uar);
mvdev->log_active = false;
}
int mlx5vf_stop_page_tracker(struct vfio_device *vdev)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
vdev, struct mlx5vf_pci_core_device, core_device.vdev);
mutex_lock(&mvdev->state_mutex);
if (!mvdev->log_active)
goto end;
_mlx5vf_free_page_tracker_resources(mvdev);
mvdev->log_active = false;
end:
mlx5vf_state_mutex_unlock(mvdev);
return 0;
}
int mlx5vf_start_page_tracker(struct vfio_device *vdev,
struct rb_root_cached *ranges, u32 nnodes,
u64 *page_size)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
vdev, struct mlx5vf_pci_core_device, core_device.vdev);
struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
u8 log_tracked_page = ilog2(*page_size);
struct mlx5_vhca_qp *host_qp;
struct mlx5_vhca_qp *fw_qp;
struct mlx5_core_dev *mdev;
u32 max_msg_size = PAGE_SIZE;
u64 rq_size = SZ_2M;
u32 max_recv_wr;
int err;
mutex_lock(&mvdev->state_mutex);
if (mvdev->mdev_detach) {
err = -ENOTCONN;
goto end;
}
if (mvdev->log_active) {
err = -EINVAL;
goto end;
}
mdev = mvdev->mdev;
memset(tracker, 0, sizeof(*tracker));
tracker->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(tracker->uar)) {
err = PTR_ERR(tracker->uar);
goto end;
}
err = mlx5_core_alloc_pd(mdev, &tracker->pdn);
if (err)
goto err_uar;
max_recv_wr = DIV_ROUND_UP_ULL(rq_size, max_msg_size);
err = mlx5vf_create_cq(mdev, tracker, max_recv_wr);
if (err)
goto err_dealloc_pd;
host_qp = mlx5vf_create_rc_qp(mdev, tracker, max_recv_wr);
if (IS_ERR(host_qp)) {
err = PTR_ERR(host_qp);
goto err_cq;
}
host_qp->max_msg_size = max_msg_size;
if (log_tracked_page < MLX5_CAP_ADV_VIRTUALIZATION(mdev,
pg_track_log_min_page_size)) {
log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
pg_track_log_min_page_size);
} else if (log_tracked_page > MLX5_CAP_ADV_VIRTUALIZATION(mdev,
pg_track_log_max_page_size)) {
log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev,
pg_track_log_max_page_size);
}
host_qp->tracked_page_size = (1ULL << log_tracked_page);
err = mlx5vf_alloc_qp_recv_resources(mdev, host_qp, tracker->pdn,
rq_size);
if (err)
goto err_host_qp;
fw_qp = mlx5vf_create_rc_qp(mdev, tracker, 0);
if (IS_ERR(fw_qp)) {
err = PTR_ERR(fw_qp);
goto err_recv_resources;
}
err = mlx5vf_activate_qp(mdev, host_qp, fw_qp->qpn, true);
if (err)
goto err_activate;
err = mlx5vf_activate_qp(mdev, fw_qp, host_qp->qpn, false);
if (err)
goto err_activate;
tracker->host_qp = host_qp;
tracker->fw_qp = fw_qp;
err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes);
if (err)
goto err_activate;
MLX5_NB_INIT(&tracker->nb, mlx5vf_event_notifier, NOTIFY_ANY);
mlx5_eq_notifier_register(mdev, &tracker->nb);
*page_size = host_qp->tracked_page_size;
mvdev->log_active = true;
mlx5vf_state_mutex_unlock(mvdev);
return 0;
err_activate:
mlx5vf_destroy_qp(mdev, fw_qp);
err_recv_resources:
mlx5vf_free_qp_recv_resources(mdev, host_qp);
err_host_qp:
mlx5vf_destroy_qp(mdev, host_qp);
err_cq:
mlx5vf_destroy_cq(mdev, &tracker->cq);
err_dealloc_pd:
mlx5_core_dealloc_pd(mdev, tracker->pdn);
err_uar:
mlx5_put_uars_page(mdev, tracker->uar);
end:
mlx5vf_state_mutex_unlock(mvdev);
return err;
}
static void
set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp,
struct iova_bitmap *dirty)
{
u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry);
u32 nent = size / entry_size;
struct page *page;
u64 addr;
u64 *buf;
int i;
if (WARN_ON(index >= qp->recv_buf.npages ||
(nent > qp->max_msg_size / entry_size)))
return;
page = qp->recv_buf.page_list[index];
buf = kmap_local_page(page);
for (i = 0; i < nent; i++) {
addr = MLX5_GET(page_track_report_entry, buf + i,
dirty_address_low);
addr |= (u64)MLX5_GET(page_track_report_entry, buf + i,
dirty_address_high) << 32;
iova_bitmap_set(dirty, addr, qp->tracked_page_size);
}
kunmap_local(buf);
}
static void
mlx5vf_rq_cqe(struct mlx5_vhca_qp *qp, struct mlx5_cqe64 *cqe,
struct iova_bitmap *dirty, int *tracker_status)
{
u32 size;
int ix;
qp->rq.cc++;
*tracker_status = be32_to_cpu(cqe->immediate) >> 28;
size = be32_to_cpu(cqe->byte_cnt);
ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1);
/* zero length CQE, no data */
WARN_ON(!size && *tracker_status == MLX5_PAGE_TRACK_STATE_REPORTING);
if (size)
set_report_output(size, ix, qp, dirty);
qp->recv_buf.next_rq_offset = ix * qp->max_msg_size;
mlx5vf_post_recv(qp);
}
static void *get_cqe(struct mlx5_vhca_cq *cq, int n)
{
return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
}
static struct mlx5_cqe64 *get_sw_cqe(struct mlx5_vhca_cq *cq, int n)
{
void *cqe = get_cqe(cq, n & (cq->ncqe - 1));
struct mlx5_cqe64 *cqe64;
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
!((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ncqe)))) {
return cqe64;
} else {
return NULL;
}
}
static int
mlx5vf_cq_poll_one(struct mlx5_vhca_cq *cq, struct mlx5_vhca_qp *qp,
struct iova_bitmap *dirty, int *tracker_status)
{
struct mlx5_cqe64 *cqe;
u8 opcode;
cqe = get_sw_cqe(cq, cq->mcq.cons_index);
if (!cqe)
return CQ_EMPTY;
++cq->mcq.cons_index;
/*
* Make sure we read CQ entry contents after we've checked the
* ownership bit.
*/
rmb();
opcode = get_cqe_opcode(cqe);
switch (opcode) {
case MLX5_CQE_RESP_SEND_IMM:
mlx5vf_rq_cqe(qp, cqe, dirty, tracker_status);
return CQ_OK;
default:
return CQ_POLL_ERR;
}
}
int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
unsigned long length,
struct iova_bitmap *dirty)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
vdev, struct mlx5vf_pci_core_device, core_device.vdev);
struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
struct mlx5_vhca_cq *cq = &tracker->cq;
struct mlx5_core_dev *mdev;
int poll_err, err;
mutex_lock(&mvdev->state_mutex);
if (!mvdev->log_active) {
err = -EINVAL;
goto end;
}
if (mvdev->mdev_detach) {
err = -ENOTCONN;
goto end;
}
mdev = mvdev->mdev;
err = mlx5vf_cmd_modify_tracker(mdev, tracker->id, iova, length,
MLX5_PAGE_TRACK_STATE_REPORTING);
if (err)
goto end;
tracker->status = MLX5_PAGE_TRACK_STATE_REPORTING;
while (tracker->status == MLX5_PAGE_TRACK_STATE_REPORTING &&
!tracker->is_err) {
poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp, dirty,
&tracker->status);
if (poll_err == CQ_EMPTY) {
mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map,
cq->mcq.cons_index);
poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp,
dirty, &tracker->status);
if (poll_err == CQ_EMPTY) {
wait_for_completion(&mvdev->tracker_comp);
continue;
}
}
if (poll_err == CQ_POLL_ERR) {
err = -EIO;
goto end;
}
mlx5_cq_set_ci(&cq->mcq);
}
if (tracker->status == MLX5_PAGE_TRACK_STATE_ERROR)
tracker->is_err = true;
if (tracker->is_err)
err = -EIO;
end:
mlx5vf_state_mutex_unlock(mvdev);
return err;
}
| linux-master | drivers/vfio/pci/mlx5/cmd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/sched/mm.h>
#include <linux/anon_inodes.h>
#include "cmd.h"
/* Device specification max LOAD size */
#define MAX_LOAD_SIZE (BIT_ULL(__mlx5_bit_sz(load_vhca_state_in, size)) - 1)
static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev)
{
struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
return container_of(core_device, struct mlx5vf_pci_core_device,
core_device);
}
struct page *
mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
unsigned long offset)
{
unsigned long cur_offset = 0;
struct scatterlist *sg;
unsigned int i;
/* All accesses are sequential */
if (offset < buf->last_offset || !buf->last_offset_sg) {
buf->last_offset = 0;
buf->last_offset_sg = buf->table.sgt.sgl;
buf->sg_last_entry = 0;
}
cur_offset = buf->last_offset;
for_each_sg(buf->last_offset_sg, sg,
buf->table.sgt.orig_nents - buf->sg_last_entry, i) {
if (offset < sg->length + cur_offset) {
buf->last_offset_sg = sg;
buf->sg_last_entry += i;
buf->last_offset = cur_offset;
return nth_page(sg_page(sg),
(offset - cur_offset) / PAGE_SIZE);
}
cur_offset += sg->length;
}
return NULL;
}
int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
unsigned int npages)
{
unsigned int to_alloc = npages;
struct page **page_list;
unsigned long filled;
unsigned int to_fill;
int ret;
to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
if (!page_list)
return -ENOMEM;
do {
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
page_list);
if (!filled) {
ret = -ENOMEM;
goto err;
}
to_alloc -= filled;
ret = sg_alloc_append_table_from_pages(
&buf->table, page_list, filled, 0,
filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
GFP_KERNEL_ACCOUNT);
if (ret)
goto err;
buf->allocated_length += filled * PAGE_SIZE;
/* clean input for another bulk allocation */
memset(page_list, 0, filled * sizeof(*page_list));
to_fill = min_t(unsigned int, to_alloc,
PAGE_SIZE / sizeof(*page_list));
} while (to_alloc > 0);
kvfree(page_list);
return 0;
err:
kvfree(page_list);
return ret;
}
static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
{
mutex_lock(&migf->lock);
migf->state = MLX5_MIGF_STATE_ERROR;
migf->filp->f_pos = 0;
mutex_unlock(&migf->lock);
}
static int mlx5vf_release_file(struct inode *inode, struct file *filp)
{
struct mlx5_vf_migration_file *migf = filp->private_data;
mlx5vf_disable_fd(migf);
mutex_destroy(&migf->lock);
kfree(migf);
return 0;
}
static struct mlx5_vhca_data_buffer *
mlx5vf_get_data_buff_from_pos(struct mlx5_vf_migration_file *migf, loff_t pos,
bool *end_of_data)
{
struct mlx5_vhca_data_buffer *buf;
bool found = false;
*end_of_data = false;
spin_lock_irq(&migf->list_lock);
if (list_empty(&migf->buf_list)) {
*end_of_data = true;
goto end;
}
buf = list_first_entry(&migf->buf_list, struct mlx5_vhca_data_buffer,
buf_elm);
if (pos >= buf->start_pos &&
pos < buf->start_pos + buf->length) {
found = true;
goto end;
}
/*
* As we use a stream based FD we may expect having the data always
* on first chunk
*/
migf->state = MLX5_MIGF_STATE_ERROR;
end:
spin_unlock_irq(&migf->list_lock);
return found ? buf : NULL;
}
static ssize_t mlx5vf_buf_read(struct mlx5_vhca_data_buffer *vhca_buf,
char __user **buf, size_t *len, loff_t *pos)
{
unsigned long offset;
ssize_t done = 0;
size_t copy_len;
copy_len = min_t(size_t,
vhca_buf->start_pos + vhca_buf->length - *pos, *len);
while (copy_len) {
size_t page_offset;
struct page *page;
size_t page_len;
u8 *from_buff;
int ret;
offset = *pos - vhca_buf->start_pos;
page_offset = offset % PAGE_SIZE;
offset -= page_offset;
page = mlx5vf_get_migration_page(vhca_buf, offset);
if (!page)
return -EINVAL;
page_len = min_t(size_t, copy_len, PAGE_SIZE - page_offset);
from_buff = kmap_local_page(page);
ret = copy_to_user(*buf, from_buff + page_offset, page_len);
kunmap_local(from_buff);
if (ret)
return -EFAULT;
*pos += page_len;
*len -= page_len;
*buf += page_len;
done += page_len;
copy_len -= page_len;
}
if (*pos >= vhca_buf->start_pos + vhca_buf->length) {
spin_lock_irq(&vhca_buf->migf->list_lock);
list_del_init(&vhca_buf->buf_elm);
list_add_tail(&vhca_buf->buf_elm, &vhca_buf->migf->avail_list);
spin_unlock_irq(&vhca_buf->migf->list_lock);
}
return done;
}
static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
loff_t *pos)
{
struct mlx5_vf_migration_file *migf = filp->private_data;
struct mlx5_vhca_data_buffer *vhca_buf;
bool first_loop_call = true;
bool end_of_data;
ssize_t done = 0;
if (pos)
return -ESPIPE;
pos = &filp->f_pos;
if (!(filp->f_flags & O_NONBLOCK)) {
if (wait_event_interruptible(migf->poll_wait,
!list_empty(&migf->buf_list) ||
migf->state == MLX5_MIGF_STATE_ERROR ||
migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR ||
migf->state == MLX5_MIGF_STATE_PRE_COPY ||
migf->state == MLX5_MIGF_STATE_COMPLETE))
return -ERESTARTSYS;
}
mutex_lock(&migf->lock);
if (migf->state == MLX5_MIGF_STATE_ERROR) {
done = -ENODEV;
goto out_unlock;
}
while (len) {
ssize_t count;
vhca_buf = mlx5vf_get_data_buff_from_pos(migf, *pos,
&end_of_data);
if (first_loop_call) {
first_loop_call = false;
/* Temporary end of file as part of PRE_COPY */
if (end_of_data && (migf->state == MLX5_MIGF_STATE_PRE_COPY ||
migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR)) {
done = -ENOMSG;
goto out_unlock;
}
if (end_of_data && migf->state != MLX5_MIGF_STATE_COMPLETE) {
if (filp->f_flags & O_NONBLOCK) {
done = -EAGAIN;
goto out_unlock;
}
}
}
if (end_of_data)
goto out_unlock;
if (!vhca_buf) {
done = -EINVAL;
goto out_unlock;
}
count = mlx5vf_buf_read(vhca_buf, &buf, &len, pos);
if (count < 0) {
done = count;
goto out_unlock;
}
done += count;
}
out_unlock:
mutex_unlock(&migf->lock);
return done;
}
static __poll_t mlx5vf_save_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct mlx5_vf_migration_file *migf = filp->private_data;
__poll_t pollflags = 0;
poll_wait(filp, &migf->poll_wait, wait);
mutex_lock(&migf->lock);
if (migf->state == MLX5_MIGF_STATE_ERROR)
pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
else if (!list_empty(&migf->buf_list) ||
migf->state == MLX5_MIGF_STATE_COMPLETE)
pollflags = EPOLLIN | EPOLLRDNORM;
mutex_unlock(&migf->lock);
return pollflags;
}
/*
* FD is exposed and user can use it after receiving an error.
* Mark migf in error, and wake the user.
*/
static void mlx5vf_mark_err(struct mlx5_vf_migration_file *migf)
{
migf->state = MLX5_MIGF_STATE_ERROR;
wake_up_interruptible(&migf->poll_wait);
}
static int mlx5vf_add_stop_copy_header(struct mlx5_vf_migration_file *migf)
{
size_t size = sizeof(struct mlx5_vf_migration_header) +
sizeof(struct mlx5_vf_migration_tag_stop_copy_data);
struct mlx5_vf_migration_tag_stop_copy_data data = {};
struct mlx5_vhca_data_buffer *header_buf = NULL;
struct mlx5_vf_migration_header header = {};
unsigned long flags;
struct page *page;
u8 *to_buff;
int ret;
header_buf = mlx5vf_get_data_buffer(migf, size, DMA_NONE);
if (IS_ERR(header_buf))
return PTR_ERR(header_buf);
header.record_size = cpu_to_le64(sizeof(data));
header.flags = cpu_to_le32(MLX5_MIGF_HEADER_FLAGS_TAG_OPTIONAL);
header.tag = cpu_to_le32(MLX5_MIGF_HEADER_TAG_STOP_COPY_SIZE);
page = mlx5vf_get_migration_page(header_buf, 0);
if (!page) {
ret = -EINVAL;
goto err;
}
to_buff = kmap_local_page(page);
memcpy(to_buff, &header, sizeof(header));
header_buf->length = sizeof(header);
data.stop_copy_size = cpu_to_le64(migf->buf->allocated_length);
memcpy(to_buff + sizeof(header), &data, sizeof(data));
header_buf->length += sizeof(data);
kunmap_local(to_buff);
header_buf->start_pos = header_buf->migf->max_pos;
migf->max_pos += header_buf->length;
spin_lock_irqsave(&migf->list_lock, flags);
list_add_tail(&header_buf->buf_elm, &migf->buf_list);
spin_unlock_irqrestore(&migf->list_lock, flags);
migf->pre_copy_initial_bytes = size;
return 0;
err:
mlx5vf_put_data_buffer(header_buf);
return ret;
}
static int mlx5vf_prep_stop_copy(struct mlx5_vf_migration_file *migf,
size_t state_size)
{
struct mlx5_vhca_data_buffer *buf;
size_t inc_state_size;
int ret;
/* let's be ready for stop_copy size that might grow by 10 percents */
if (check_add_overflow(state_size, state_size / 10, &inc_state_size))
inc_state_size = state_size;
buf = mlx5vf_get_data_buffer(migf, inc_state_size, DMA_FROM_DEVICE);
if (IS_ERR(buf))
return PTR_ERR(buf);
migf->buf = buf;
buf = mlx5vf_get_data_buffer(migf,
sizeof(struct mlx5_vf_migration_header), DMA_NONE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto err;
}
migf->buf_header = buf;
ret = mlx5vf_add_stop_copy_header(migf);
if (ret)
goto err_header;
return 0;
err_header:
mlx5vf_put_data_buffer(migf->buf_header);
migf->buf_header = NULL;
err:
mlx5vf_put_data_buffer(migf->buf);
migf->buf = NULL;
return ret;
}
static long mlx5vf_precopy_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct mlx5_vf_migration_file *migf = filp->private_data;
struct mlx5vf_pci_core_device *mvdev = migf->mvdev;
struct mlx5_vhca_data_buffer *buf;
struct vfio_precopy_info info = {};
loff_t *pos = &filp->f_pos;
unsigned long minsz;
size_t inc_length = 0;
bool end_of_data = false;
int ret;
if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
return -ENOTTY;
minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
mutex_lock(&mvdev->state_mutex);
if (mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY &&
mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) {
ret = -EINVAL;
goto err_state_unlock;
}
/*
* We can't issue a SAVE command when the device is suspended, so as
* part of VFIO_DEVICE_STATE_PRE_COPY_P2P no reason to query for extra
* bytes that can't be read.
*/
if (mvdev->mig_state == VFIO_DEVICE_STATE_PRE_COPY) {
/*
* Once the query returns it's guaranteed that there is no
* active SAVE command.
* As so, the other code below is safe with the proper locks.
*/
ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &inc_length,
MLX5VF_QUERY_INC);
if (ret)
goto err_state_unlock;
}
mutex_lock(&migf->lock);
if (migf->state == MLX5_MIGF_STATE_ERROR) {
ret = -ENODEV;
goto err_migf_unlock;
}
if (migf->pre_copy_initial_bytes > *pos) {
info.initial_bytes = migf->pre_copy_initial_bytes - *pos;
} else {
info.dirty_bytes = migf->max_pos - *pos;
if (!info.dirty_bytes)
end_of_data = true;
info.dirty_bytes += inc_length;
}
if (!end_of_data || !inc_length) {
mutex_unlock(&migf->lock);
goto done;
}
mutex_unlock(&migf->lock);
/*
* We finished transferring the current state and the device has a
* dirty state, save a new state to be ready for.
*/
buf = mlx5vf_get_data_buffer(migf, inc_length, DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
mlx5vf_mark_err(migf);
goto err_state_unlock;
}
ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, true);
if (ret) {
mlx5vf_mark_err(migf);
mlx5vf_put_data_buffer(buf);
goto err_state_unlock;
}
done:
mlx5vf_state_mutex_unlock(mvdev);
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
err_migf_unlock:
mutex_unlock(&migf->lock);
err_state_unlock:
mlx5vf_state_mutex_unlock(mvdev);
return ret;
}
static const struct file_operations mlx5vf_save_fops = {
.owner = THIS_MODULE,
.read = mlx5vf_save_read,
.poll = mlx5vf_save_poll,
.unlocked_ioctl = mlx5vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = mlx5vf_release_file,
.llseek = no_llseek,
};
static int mlx5vf_pci_save_device_inc_data(struct mlx5vf_pci_core_device *mvdev)
{
struct mlx5_vf_migration_file *migf = mvdev->saving_migf;
struct mlx5_vhca_data_buffer *buf;
size_t length;
int ret;
if (migf->state == MLX5_MIGF_STATE_ERROR)
return -ENODEV;
ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length,
MLX5VF_QUERY_INC | MLX5VF_QUERY_FINAL);
if (ret)
goto err;
/* Checking whether we have a matching pre-allocated buffer that can fit */
if (migf->buf && migf->buf->allocated_length >= length) {
buf = migf->buf;
migf->buf = NULL;
} else {
buf = mlx5vf_get_data_buffer(migf, length, DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto err;
}
}
ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, false);
if (ret)
goto err_save;
return 0;
err_save:
mlx5vf_put_data_buffer(buf);
err:
mlx5vf_mark_err(migf);
return ret;
}
static struct mlx5_vf_migration_file *
mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track)
{
struct mlx5_vf_migration_file *migf;
struct mlx5_vhca_data_buffer *buf;
size_t length;
int ret;
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
if (!migf)
return ERR_PTR(-ENOMEM);
migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_save_fops, migf,
O_RDONLY);
if (IS_ERR(migf->filp)) {
ret = PTR_ERR(migf->filp);
goto end;
}
migf->mvdev = mvdev;
ret = mlx5vf_cmd_alloc_pd(migf);
if (ret)
goto out_free;
stream_open(migf->filp->f_inode, migf->filp);
mutex_init(&migf->lock);
init_waitqueue_head(&migf->poll_wait);
init_completion(&migf->save_comp);
/*
* save_comp is being used as a binary semaphore built from
* a completion. A normal mutex cannot be used because the lock is
* passed between kernel threads and lockdep can't model this.
*/
complete(&migf->save_comp);
mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
INIT_LIST_HEAD(&migf->buf_list);
INIT_LIST_HEAD(&migf->avail_list);
spin_lock_init(&migf->list_lock);
ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, 0);
if (ret)
goto out_pd;
if (track) {
ret = mlx5vf_prep_stop_copy(migf, length);
if (ret)
goto out_pd;
}
buf = mlx5vf_alloc_data_buffer(migf, length, DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_pd;
}
ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, false, track);
if (ret)
goto out_save;
return migf;
out_save:
mlx5vf_free_data_buffer(buf);
out_pd:
mlx5fv_cmd_clean_migf_resources(migf);
out_free:
fput(migf->filp);
end:
kfree(migf);
return ERR_PTR(ret);
}
static int
mlx5vf_append_page_to_mig_buf(struct mlx5_vhca_data_buffer *vhca_buf,
const char __user **buf, size_t *len,
loff_t *pos, ssize_t *done)
{
unsigned long offset;
size_t page_offset;
struct page *page;
size_t page_len;
u8 *to_buff;
int ret;
offset = *pos - vhca_buf->start_pos;
page_offset = offset % PAGE_SIZE;
page = mlx5vf_get_migration_page(vhca_buf, offset - page_offset);
if (!page)
return -EINVAL;
page_len = min_t(size_t, *len, PAGE_SIZE - page_offset);
to_buff = kmap_local_page(page);
ret = copy_from_user(to_buff + page_offset, *buf, page_len);
kunmap_local(to_buff);
if (ret)
return -EFAULT;
*pos += page_len;
*done += page_len;
*buf += page_len;
*len -= page_len;
vhca_buf->length += page_len;
return 0;
}
static int
mlx5vf_resume_read_image_no_header(struct mlx5_vhca_data_buffer *vhca_buf,
loff_t requested_length,
const char __user **buf, size_t *len,
loff_t *pos, ssize_t *done)
{
int ret;
if (requested_length > MAX_LOAD_SIZE)
return -ENOMEM;
if (vhca_buf->allocated_length < requested_length) {
ret = mlx5vf_add_migration_pages(
vhca_buf,
DIV_ROUND_UP(requested_length - vhca_buf->allocated_length,
PAGE_SIZE));
if (ret)
return ret;
}
while (*len) {
ret = mlx5vf_append_page_to_mig_buf(vhca_buf, buf, len, pos,
done);
if (ret)
return ret;
}
return 0;
}
static ssize_t
mlx5vf_resume_read_image(struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *vhca_buf,
size_t image_size, const char __user **buf,
size_t *len, loff_t *pos, ssize_t *done,
bool *has_work)
{
size_t copy_len, to_copy;
int ret;
to_copy = min_t(size_t, *len, image_size - vhca_buf->length);
copy_len = to_copy;
while (to_copy) {
ret = mlx5vf_append_page_to_mig_buf(vhca_buf, buf, &to_copy, pos,
done);
if (ret)
return ret;
}
*len -= copy_len;
if (vhca_buf->length == image_size) {
migf->load_state = MLX5_VF_LOAD_STATE_LOAD_IMAGE;
migf->max_pos += image_size;
*has_work = true;
}
return 0;
}
static int
mlx5vf_resume_read_header_data(struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *vhca_buf,
const char __user **buf, size_t *len,
loff_t *pos, ssize_t *done)
{
size_t copy_len, to_copy;
size_t required_data;
u8 *to_buff;
int ret;
required_data = migf->record_size - vhca_buf->length;
to_copy = min_t(size_t, *len, required_data);
copy_len = to_copy;
while (to_copy) {
ret = mlx5vf_append_page_to_mig_buf(vhca_buf, buf, &to_copy, pos,
done);
if (ret)
return ret;
}
*len -= copy_len;
if (vhca_buf->length == migf->record_size) {
switch (migf->record_tag) {
case MLX5_MIGF_HEADER_TAG_STOP_COPY_SIZE:
{
struct page *page;
page = mlx5vf_get_migration_page(vhca_buf, 0);
if (!page)
return -EINVAL;
to_buff = kmap_local_page(page);
migf->stop_copy_prep_size = min_t(u64,
le64_to_cpup((__le64 *)to_buff), MAX_LOAD_SIZE);
kunmap_local(to_buff);
break;
}
default:
/* Optional tag */
break;
}
migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
migf->max_pos += migf->record_size;
vhca_buf->length = 0;
}
return 0;
}
static int
mlx5vf_resume_read_header(struct mlx5_vf_migration_file *migf,
struct mlx5_vhca_data_buffer *vhca_buf,
const char __user **buf,
size_t *len, loff_t *pos,
ssize_t *done, bool *has_work)
{
struct page *page;
size_t copy_len;
u8 *to_buff;
int ret;
copy_len = min_t(size_t, *len,
sizeof(struct mlx5_vf_migration_header) - vhca_buf->length);
page = mlx5vf_get_migration_page(vhca_buf, 0);
if (!page)
return -EINVAL;
to_buff = kmap_local_page(page);
ret = copy_from_user(to_buff + vhca_buf->length, *buf, copy_len);
if (ret) {
ret = -EFAULT;
goto end;
}
*buf += copy_len;
*pos += copy_len;
*done += copy_len;
*len -= copy_len;
vhca_buf->length += copy_len;
if (vhca_buf->length == sizeof(struct mlx5_vf_migration_header)) {
u64 record_size;
u32 flags;
record_size = le64_to_cpup((__le64 *)to_buff);
if (record_size > MAX_LOAD_SIZE) {
ret = -ENOMEM;
goto end;
}
migf->record_size = record_size;
flags = le32_to_cpup((__le32 *)(to_buff +
offsetof(struct mlx5_vf_migration_header, flags)));
migf->record_tag = le32_to_cpup((__le32 *)(to_buff +
offsetof(struct mlx5_vf_migration_header, tag)));
switch (migf->record_tag) {
case MLX5_MIGF_HEADER_TAG_FW_DATA:
migf->load_state = MLX5_VF_LOAD_STATE_PREP_IMAGE;
break;
case MLX5_MIGF_HEADER_TAG_STOP_COPY_SIZE:
migf->load_state = MLX5_VF_LOAD_STATE_PREP_HEADER_DATA;
break;
default:
if (!(flags & MLX5_MIGF_HEADER_FLAGS_TAG_OPTIONAL)) {
ret = -EOPNOTSUPP;
goto end;
}
/* We may read and skip this optional record data */
migf->load_state = MLX5_VF_LOAD_STATE_PREP_HEADER_DATA;
}
migf->max_pos += vhca_buf->length;
vhca_buf->length = 0;
*has_work = true;
}
end:
kunmap_local(to_buff);
return ret;
}
static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
size_t len, loff_t *pos)
{
struct mlx5_vf_migration_file *migf = filp->private_data;
struct mlx5_vhca_data_buffer *vhca_buf = migf->buf;
struct mlx5_vhca_data_buffer *vhca_buf_header = migf->buf_header;
loff_t requested_length;
bool has_work = false;
ssize_t done = 0;
int ret = 0;
if (pos)
return -ESPIPE;
pos = &filp->f_pos;
if (*pos < 0 ||
check_add_overflow((loff_t)len, *pos, &requested_length))
return -EINVAL;
mutex_lock(&migf->mvdev->state_mutex);
mutex_lock(&migf->lock);
if (migf->state == MLX5_MIGF_STATE_ERROR) {
ret = -ENODEV;
goto out_unlock;
}
while (len || has_work) {
has_work = false;
switch (migf->load_state) {
case MLX5_VF_LOAD_STATE_READ_HEADER:
ret = mlx5vf_resume_read_header(migf, vhca_buf_header,
&buf, &len, pos,
&done, &has_work);
if (ret)
goto out_unlock;
break;
case MLX5_VF_LOAD_STATE_PREP_HEADER_DATA:
if (vhca_buf_header->allocated_length < migf->record_size) {
mlx5vf_free_data_buffer(vhca_buf_header);
migf->buf_header = mlx5vf_alloc_data_buffer(migf,
migf->record_size, DMA_NONE);
if (IS_ERR(migf->buf_header)) {
ret = PTR_ERR(migf->buf_header);
migf->buf_header = NULL;
goto out_unlock;
}
vhca_buf_header = migf->buf_header;
}
vhca_buf_header->start_pos = migf->max_pos;
migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER_DATA;
break;
case MLX5_VF_LOAD_STATE_READ_HEADER_DATA:
ret = mlx5vf_resume_read_header_data(migf, vhca_buf_header,
&buf, &len, pos, &done);
if (ret)
goto out_unlock;
break;
case MLX5_VF_LOAD_STATE_PREP_IMAGE:
{
u64 size = max(migf->record_size,
migf->stop_copy_prep_size);
if (vhca_buf->allocated_length < size) {
mlx5vf_free_data_buffer(vhca_buf);
migf->buf = mlx5vf_alloc_data_buffer(migf,
size, DMA_TO_DEVICE);
if (IS_ERR(migf->buf)) {
ret = PTR_ERR(migf->buf);
migf->buf = NULL;
goto out_unlock;
}
vhca_buf = migf->buf;
}
vhca_buf->start_pos = migf->max_pos;
migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE;
break;
}
case MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER:
ret = mlx5vf_resume_read_image_no_header(vhca_buf,
requested_length,
&buf, &len, pos, &done);
if (ret)
goto out_unlock;
break;
case MLX5_VF_LOAD_STATE_READ_IMAGE:
ret = mlx5vf_resume_read_image(migf, vhca_buf,
migf->record_size,
&buf, &len, pos, &done, &has_work);
if (ret)
goto out_unlock;
break;
case MLX5_VF_LOAD_STATE_LOAD_IMAGE:
ret = mlx5vf_cmd_load_vhca_state(migf->mvdev, migf, vhca_buf);
if (ret)
goto out_unlock;
migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
/* prep header buf for next image */
vhca_buf_header->length = 0;
/* prep data buf for next image */
vhca_buf->length = 0;
break;
default:
break;
}
}
out_unlock:
if (ret)
migf->state = MLX5_MIGF_STATE_ERROR;
mutex_unlock(&migf->lock);
mlx5vf_state_mutex_unlock(migf->mvdev);
return ret ? ret : done;
}
static const struct file_operations mlx5vf_resume_fops = {
.owner = THIS_MODULE,
.write = mlx5vf_resume_write,
.release = mlx5vf_release_file,
.llseek = no_llseek,
};
static struct mlx5_vf_migration_file *
mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
{
struct mlx5_vf_migration_file *migf;
struct mlx5_vhca_data_buffer *buf;
int ret;
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
if (!migf)
return ERR_PTR(-ENOMEM);
migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_resume_fops, migf,
O_WRONLY);
if (IS_ERR(migf->filp)) {
ret = PTR_ERR(migf->filp);
goto end;
}
migf->mvdev = mvdev;
ret = mlx5vf_cmd_alloc_pd(migf);
if (ret)
goto out_free;
buf = mlx5vf_alloc_data_buffer(migf, 0, DMA_TO_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_pd;
}
migf->buf = buf;
if (MLX5VF_PRE_COPY_SUPP(mvdev)) {
buf = mlx5vf_alloc_data_buffer(migf,
sizeof(struct mlx5_vf_migration_header), DMA_NONE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_buf;
}
migf->buf_header = buf;
migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
} else {
/* Initial state will be to read the image */
migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER;
}
stream_open(migf->filp->f_inode, migf->filp);
mutex_init(&migf->lock);
INIT_LIST_HEAD(&migf->buf_list);
INIT_LIST_HEAD(&migf->avail_list);
spin_lock_init(&migf->list_lock);
return migf;
out_buf:
mlx5vf_free_data_buffer(migf->buf);
out_pd:
mlx5vf_cmd_dealloc_pd(migf);
out_free:
fput(migf->filp);
end:
kfree(migf);
return ERR_PTR(ret);
}
void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
{
if (mvdev->resuming_migf) {
mlx5vf_disable_fd(mvdev->resuming_migf);
mlx5fv_cmd_clean_migf_resources(mvdev->resuming_migf);
fput(mvdev->resuming_migf->filp);
mvdev->resuming_migf = NULL;
}
if (mvdev->saving_migf) {
mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
cancel_work_sync(&mvdev->saving_migf->async_data.work);
mlx5vf_disable_fd(mvdev->saving_migf);
mlx5fv_cmd_clean_migf_resources(mvdev->saving_migf);
fput(mvdev->saving_migf->filp);
mvdev->saving_migf = NULL;
}
}
static struct file *
mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
u32 new)
{
u32 cur = mvdev->mig_state;
int ret;
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
ret = mlx5vf_cmd_suspend_vhca(mvdev,
MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
if (ret)
return ERR_PTR(ret);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
ret = mlx5vf_cmd_resume_vhca(mvdev,
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER);
if (ret)
return ERR_PTR(ret);
return NULL;
}
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
(cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
ret = mlx5vf_cmd_suspend_vhca(mvdev,
MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR);
if (ret)
return ERR_PTR(ret);
return NULL;
}
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
ret = mlx5vf_cmd_resume_vhca(mvdev,
MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR);
if (ret)
return ERR_PTR(ret);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
struct mlx5_vf_migration_file *migf;
migf = mlx5vf_pci_save_device_data(mvdev, false);
if (IS_ERR(migf))
return ERR_CAST(migf);
get_file(migf->filp);
mvdev->saving_migf = migf;
return migf->filp;
}
if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
(cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
mlx5vf_disable_fds(mvdev);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
struct mlx5_vf_migration_file *migf;
migf = mlx5vf_pci_resume_device_data(mvdev);
if (IS_ERR(migf))
return ERR_CAST(migf);
get_file(migf->filp);
mvdev->resuming_migf = migf;
return migf->filp;
}
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
if (!MLX5VF_PRE_COPY_SUPP(mvdev)) {
ret = mlx5vf_cmd_load_vhca_state(mvdev,
mvdev->resuming_migf,
mvdev->resuming_migf->buf);
if (ret)
return ERR_PTR(ret);
}
mlx5vf_disable_fds(mvdev);
return NULL;
}
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
(cur == VFIO_DEVICE_STATE_RUNNING_P2P &&
new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
struct mlx5_vf_migration_file *migf;
migf = mlx5vf_pci_save_device_data(mvdev, true);
if (IS_ERR(migf))
return ERR_CAST(migf);
get_file(migf->filp);
mvdev->saving_migf = migf;
return migf->filp;
}
if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
ret = mlx5vf_cmd_suspend_vhca(mvdev,
MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER);
if (ret)
return ERR_PTR(ret);
ret = mlx5vf_pci_save_device_inc_data(mvdev);
return ret ? ERR_PTR(ret) : NULL;
}
/*
* vfio_mig_get_next_state() does not use arcs other than the above
*/
WARN_ON(true);
return ERR_PTR(-EINVAL);
}
/*
* This function is called in all state_mutex unlock cases to
* handle a 'deferred_reset' if exists.
*/
void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
{
again:
spin_lock(&mvdev->reset_lock);
if (mvdev->deferred_reset) {
mvdev->deferred_reset = false;
spin_unlock(&mvdev->reset_lock);
mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
mlx5vf_disable_fds(mvdev);
goto again;
}
mutex_unlock(&mvdev->state_mutex);
spin_unlock(&mvdev->reset_lock);
}
static struct file *
mlx5vf_pci_set_device_state(struct vfio_device *vdev,
enum vfio_device_mig_state new_state)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
vdev, struct mlx5vf_pci_core_device, core_device.vdev);
enum vfio_device_mig_state next_state;
struct file *res = NULL;
int ret;
mutex_lock(&mvdev->state_mutex);
while (new_state != mvdev->mig_state) {
ret = vfio_mig_get_next_state(vdev, mvdev->mig_state,
new_state, &next_state);
if (ret) {
res = ERR_PTR(ret);
break;
}
res = mlx5vf_pci_step_device_state_locked(mvdev, next_state);
if (IS_ERR(res))
break;
mvdev->mig_state = next_state;
if (WARN_ON(res && new_state != mvdev->mig_state)) {
fput(res);
res = ERR_PTR(-EINVAL);
break;
}
}
mlx5vf_state_mutex_unlock(mvdev);
return res;
}
static int mlx5vf_pci_get_data_size(struct vfio_device *vdev,
unsigned long *stop_copy_length)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
vdev, struct mlx5vf_pci_core_device, core_device.vdev);
size_t state_size;
int ret;
mutex_lock(&mvdev->state_mutex);
ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
&state_size, 0);
if (!ret)
*stop_copy_length = state_size;
mlx5vf_state_mutex_unlock(mvdev);
return ret;
}
static int mlx5vf_pci_get_device_state(struct vfio_device *vdev,
enum vfio_device_mig_state *curr_state)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
vdev, struct mlx5vf_pci_core_device, core_device.vdev);
mutex_lock(&mvdev->state_mutex);
*curr_state = mvdev->mig_state;
mlx5vf_state_mutex_unlock(mvdev);
return 0;
}
static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev)
{
struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
if (!mvdev->migrate_cap)
return;
/*
* As the higher VFIO layers are holding locks across reset and using
* those same locks with the mm_lock we need to prevent ABBA deadlock
* with the state_mutex and mm_lock.
* In case the state_mutex was taken already we defer the cleanup work
* to the unlock flow of the other running context.
*/
spin_lock(&mvdev->reset_lock);
mvdev->deferred_reset = true;
if (!mutex_trylock(&mvdev->state_mutex)) {
spin_unlock(&mvdev->reset_lock);
return;
}
spin_unlock(&mvdev->reset_lock);
mlx5vf_state_mutex_unlock(mvdev);
}
static int mlx5vf_pci_open_device(struct vfio_device *core_vdev)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
struct vfio_pci_core_device *vdev = &mvdev->core_device;
int ret;
ret = vfio_pci_core_enable(vdev);
if (ret)
return ret;
if (mvdev->migrate_cap)
mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
vfio_pci_core_finish_enable(vdev);
return 0;
}
static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
{
struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
mlx5vf_cmd_close_migratable(mvdev);
vfio_pci_core_close_device(core_vdev);
}
static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
.migration_set_state = mlx5vf_pci_set_device_state,
.migration_get_state = mlx5vf_pci_get_device_state,
.migration_get_data_size = mlx5vf_pci_get_data_size,
};
static const struct vfio_log_ops mlx5vf_pci_log_ops = {
.log_start = mlx5vf_start_page_tracker,
.log_stop = mlx5vf_stop_page_tracker,
.log_read_and_clear = mlx5vf_tracker_read_and_clear,
};
static int mlx5vf_pci_init_dev(struct vfio_device *core_vdev)
{
struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
struct mlx5vf_pci_core_device, core_device.vdev);
int ret;
ret = vfio_pci_core_init_dev(core_vdev);
if (ret)
return ret;
mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops,
&mlx5vf_pci_log_ops);
return 0;
}
static void mlx5vf_pci_release_dev(struct vfio_device *core_vdev)
{
struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
struct mlx5vf_pci_core_device, core_device.vdev);
mlx5vf_cmd_remove_migratable(mvdev);
vfio_pci_core_release_dev(core_vdev);
}
static const struct vfio_device_ops mlx5vf_pci_ops = {
.name = "mlx5-vfio-pci",
.init = mlx5vf_pci_init_dev,
.release = mlx5vf_pci_release_dev,
.open_device = mlx5vf_pci_open_device,
.close_device = mlx5vf_pci_close_device,
.ioctl = vfio_pci_core_ioctl,
.device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int mlx5vf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mlx5vf_pci_core_device *mvdev;
int ret;
mvdev = vfio_alloc_device(mlx5vf_pci_core_device, core_device.vdev,
&pdev->dev, &mlx5vf_pci_ops);
if (IS_ERR(mvdev))
return PTR_ERR(mvdev);
dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
goto out_put_vdev;
return 0;
out_put_vdev:
vfio_put_device(&mvdev->core_device.vdev);
return ret;
}
static void mlx5vf_pci_remove(struct pci_dev *pdev)
{
struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
vfio_pci_core_unregister_device(&mvdev->core_device);
vfio_put_device(&mvdev->core_device.vdev);
}
static const struct pci_device_id mlx5vf_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_MELLANOX, 0x101e) }, /* ConnectX Family mlx5Gen Virtual Function */
{}
};
MODULE_DEVICE_TABLE(pci, mlx5vf_pci_table);
static const struct pci_error_handlers mlx5vf_err_handlers = {
.reset_done = mlx5vf_pci_aer_reset_done,
.error_detected = vfio_pci_core_aer_err_detected,
};
static struct pci_driver mlx5vf_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mlx5vf_pci_table,
.probe = mlx5vf_pci_probe,
.remove = mlx5vf_pci_remove,
.err_handler = &mlx5vf_err_handlers,
.driver_managed_dma = true,
};
module_pci_driver(mlx5vf_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Max Gurtovoy <[email protected]>");
MODULE_AUTHOR("Yishai Hadas <[email protected]>");
MODULE_DESCRIPTION(
"MLX5 VFIO PCI - User Level meta-driver for MLX5 device family");
| linux-master | drivers/vfio/pci/mlx5/main.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
#include <linux/vfio.h>
#include <linux/vfio_pci_core.h>
#include "lm.h"
#include "dirty.h"
#include "vfio_dev.h"
struct pci_dev *pds_vfio_to_pci_dev(struct pds_vfio_pci_device *pds_vfio)
{
return pds_vfio->vfio_coredev.pdev;
}
struct device *pds_vfio_to_dev(struct pds_vfio_pci_device *pds_vfio)
{
return &pds_vfio_to_pci_dev(pds_vfio)->dev;
}
struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
{
struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
return container_of(core_device, struct pds_vfio_pci_device,
vfio_coredev);
}
void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
{
again:
spin_lock(&pds_vfio->reset_lock);
if (pds_vfio->deferred_reset) {
pds_vfio->deferred_reset = false;
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
pds_vfio_put_restore_file(pds_vfio);
pds_vfio_put_save_file(pds_vfio);
pds_vfio_dirty_disable(pds_vfio, false);
}
pds_vfio->state = pds_vfio->deferred_reset_state;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
spin_unlock(&pds_vfio->reset_lock);
goto again;
}
mutex_unlock(&pds_vfio->state_mutex);
spin_unlock(&pds_vfio->reset_lock);
}
void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
{
spin_lock(&pds_vfio->reset_lock);
pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
if (!mutex_trylock(&pds_vfio->state_mutex)) {
spin_unlock(&pds_vfio->reset_lock);
return;
}
spin_unlock(&pds_vfio->reset_lock);
pds_vfio_state_mutex_unlock(pds_vfio);
}
static struct file *
pds_vfio_set_device_state(struct vfio_device *vdev,
enum vfio_device_mig_state new_state)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
struct file *res = NULL;
mutex_lock(&pds_vfio->state_mutex);
/*
* only way to transition out of VFIO_DEVICE_STATE_ERROR is via
* VFIO_DEVICE_RESET, so prevent the state machine from running since
* vfio_mig_get_next_state() will throw a WARN_ON() when transitioning
* from VFIO_DEVICE_STATE_ERROR to any other state
*/
while (pds_vfio->state != VFIO_DEVICE_STATE_ERROR &&
new_state != pds_vfio->state) {
enum vfio_device_mig_state next_state;
int err = vfio_mig_get_next_state(vdev, pds_vfio->state,
new_state, &next_state);
if (err) {
res = ERR_PTR(err);
break;
}
res = pds_vfio_step_device_state_locked(pds_vfio, next_state);
if (IS_ERR(res))
break;
pds_vfio->state = next_state;
if (WARN_ON(res && new_state != pds_vfio->state)) {
res = ERR_PTR(-EINVAL);
break;
}
}
pds_vfio_state_mutex_unlock(pds_vfio);
/* still waiting on a deferred_reset */
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR)
res = ERR_PTR(-EIO);
return res;
}
static int pds_vfio_get_device_state(struct vfio_device *vdev,
enum vfio_device_mig_state *current_state)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
mutex_lock(&pds_vfio->state_mutex);
*current_state = pds_vfio->state;
pds_vfio_state_mutex_unlock(pds_vfio);
return 0;
}
static int pds_vfio_get_device_state_size(struct vfio_device *vdev,
unsigned long *stop_copy_length)
{
*stop_copy_length = PDS_LM_DEVICE_STATE_LENGTH;
return 0;
}
static const struct vfio_migration_ops pds_vfio_lm_ops = {
.migration_set_state = pds_vfio_set_device_state,
.migration_get_state = pds_vfio_get_device_state,
.migration_get_data_size = pds_vfio_get_device_state_size
};
static const struct vfio_log_ops pds_vfio_log_ops = {
.log_start = pds_vfio_dma_logging_start,
.log_stop = pds_vfio_dma_logging_stop,
.log_read_and_clear = pds_vfio_dma_logging_report,
};
static int pds_vfio_init_device(struct vfio_device *vdev)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
struct pci_dev *pdev = to_pci_dev(vdev->dev);
int err, vf_id, pci_id;
vf_id = pci_iov_vf_id(pdev);
if (vf_id < 0)
return vf_id;
err = vfio_pci_core_init_dev(vdev);
if (err)
return err;
pds_vfio->vf_id = vf_id;
vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
vdev->mig_ops = &pds_vfio_lm_ops;
vdev->log_ops = &pds_vfio_log_ops;
pci_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
dev_dbg(&pdev->dev,
"%s: PF %#04x VF %#04x vf_id %d domain %d pds_vfio %p\n",
__func__, pci_dev_id(pdev->physfn), pci_id, vf_id,
pci_domain_nr(pdev->bus), pds_vfio);
return 0;
}
static int pds_vfio_open_device(struct vfio_device *vdev)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
int err;
err = vfio_pci_core_enable(&pds_vfio->vfio_coredev);
if (err)
return err;
mutex_init(&pds_vfio->state_mutex);
pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
vfio_pci_core_finish_enable(&pds_vfio->vfio_coredev);
return 0;
}
static void pds_vfio_close_device(struct vfio_device *vdev)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
mutex_lock(&pds_vfio->state_mutex);
pds_vfio_put_restore_file(pds_vfio);
pds_vfio_put_save_file(pds_vfio);
pds_vfio_dirty_disable(pds_vfio, true);
mutex_unlock(&pds_vfio->state_mutex);
mutex_destroy(&pds_vfio->state_mutex);
vfio_pci_core_close_device(vdev);
}
static const struct vfio_device_ops pds_vfio_ops = {
.name = "pds-vfio",
.init = pds_vfio_init_device,
.release = vfio_pci_core_release_dev,
.open_device = pds_vfio_open_device,
.close_device = pds_vfio_close_device,
.ioctl = vfio_pci_core_ioctl,
.device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
};
const struct vfio_device_ops *pds_vfio_ops_info(void)
{
return &pds_vfio_ops;
}
| linux-master | drivers/vfio/pci/pds/vfio_dev.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
#include <linux/interval_tree.h>
#include <linux/vfio.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_core_if.h>
#include <linux/pds/pds_adminq.h>
#include "vfio_dev.h"
#include "cmds.h"
#include "dirty.h"
#define READ_SEQ true
#define WRITE_ACK false
bool pds_vfio_dirty_is_enabled(struct pds_vfio_pci_device *pds_vfio)
{
return pds_vfio->dirty.is_enabled;
}
void pds_vfio_dirty_set_enabled(struct pds_vfio_pci_device *pds_vfio)
{
pds_vfio->dirty.is_enabled = true;
}
void pds_vfio_dirty_set_disabled(struct pds_vfio_pci_device *pds_vfio)
{
pds_vfio->dirty.is_enabled = false;
}
static void
pds_vfio_print_guest_region_info(struct pds_vfio_pci_device *pds_vfio,
u8 max_regions)
{
int len = max_regions * sizeof(struct pds_lm_dirty_region_info);
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
struct pds_lm_dirty_region_info *region_info;
dma_addr_t regions_dma;
u8 num_regions;
int err;
region_info = kcalloc(max_regions,
sizeof(struct pds_lm_dirty_region_info),
GFP_KERNEL);
if (!region_info)
return;
regions_dma =
dma_map_single(pdsc_dev, region_info, len, DMA_FROM_DEVICE);
if (dma_mapping_error(pdsc_dev, regions_dma))
goto out_free_region_info;
err = pds_vfio_dirty_status_cmd(pds_vfio, regions_dma, &max_regions,
&num_regions);
dma_unmap_single(pdsc_dev, regions_dma, len, DMA_FROM_DEVICE);
if (err)
goto out_free_region_info;
for (unsigned int i = 0; i < num_regions; i++)
dev_dbg(&pdev->dev,
"region_info[%d]: dma_base 0x%llx page_count %u page_size_log2 %u\n",
i, le64_to_cpu(region_info[i].dma_base),
le32_to_cpu(region_info[i].page_count),
region_info[i].page_size_log2);
out_free_region_info:
kfree(region_info);
}
static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_dirty *dirty,
unsigned long bytes)
{
unsigned long *host_seq_bmp, *host_ack_bmp;
host_seq_bmp = vzalloc(bytes);
if (!host_seq_bmp)
return -ENOMEM;
host_ack_bmp = vzalloc(bytes);
if (!host_ack_bmp) {
bitmap_free(host_seq_bmp);
return -ENOMEM;
}
dirty->host_seq.bmp = host_seq_bmp;
dirty->host_ack.bmp = host_ack_bmp;
return 0;
}
static void pds_vfio_dirty_free_bitmaps(struct pds_vfio_dirty *dirty)
{
vfree(dirty->host_seq.bmp);
vfree(dirty->host_ack.bmp);
dirty->host_seq.bmp = NULL;
dirty->host_ack.bmp = NULL;
}
static void __pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio,
struct pds_vfio_bmp_info *bmp_info)
{
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
dma_unmap_single(pdsc_dev, bmp_info->sgl_addr,
bmp_info->num_sge * sizeof(struct pds_lm_sg_elem),
DMA_BIDIRECTIONAL);
kfree(bmp_info->sgl);
bmp_info->num_sge = 0;
bmp_info->sgl = NULL;
bmp_info->sgl_addr = 0;
}
static void pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio)
{
if (pds_vfio->dirty.host_seq.sgl)
__pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_seq);
if (pds_vfio->dirty.host_ack.sgl)
__pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_ack);
}
static int __pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
struct pds_vfio_bmp_info *bmp_info,
u32 page_count)
{
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
struct pds_lm_sg_elem *sgl;
dma_addr_t sgl_addr;
size_t sgl_size;
u32 max_sge;
max_sge = DIV_ROUND_UP(page_count, PAGE_SIZE * 8);
sgl_size = max_sge * sizeof(struct pds_lm_sg_elem);
sgl = kzalloc(sgl_size, GFP_KERNEL);
if (!sgl)
return -ENOMEM;
sgl_addr = dma_map_single(pdsc_dev, sgl, sgl_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(pdsc_dev, sgl_addr)) {
kfree(sgl);
return -EIO;
}
bmp_info->sgl = sgl;
bmp_info->num_sge = max_sge;
bmp_info->sgl_addr = sgl_addr;
return 0;
}
static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
u32 page_count)
{
struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
int err;
err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_seq,
page_count);
if (err)
return err;
err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_ack,
page_count);
if (err) {
__pds_vfio_dirty_free_sgl(pds_vfio, &dirty->host_seq);
return err;
}
return 0;
}
static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
struct rb_root_cached *ranges, u32 nnodes,
u64 *page_size)
{
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
u64 region_start, region_size, region_page_size;
struct pds_lm_dirty_region_info *region_info;
struct interval_tree_node *node = NULL;
u8 max_regions = 0, num_regions;
dma_addr_t regions_dma = 0;
u32 num_ranges = nnodes;
u32 page_count;
u16 len;
int err;
dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n",
pds_vfio->vf_id);
if (pds_vfio_dirty_is_enabled(pds_vfio))
return -EINVAL;
/* find if dirty tracking is disabled, i.e. num_regions == 0 */
err = pds_vfio_dirty_status_cmd(pds_vfio, 0, &max_regions,
&num_regions);
if (err < 0) {
dev_err(&pdev->dev, "Failed to get dirty status, err %pe\n",
ERR_PTR(err));
return err;
} else if (num_regions) {
dev_err(&pdev->dev,
"Dirty tracking already enabled for %d regions\n",
num_regions);
return -EEXIST;
} else if (!max_regions) {
dev_err(&pdev->dev,
"Device doesn't support dirty tracking, max_regions %d\n",
max_regions);
return -EOPNOTSUPP;
}
/*
* Only support 1 region for now. If there are any large gaps in the
* VM's address regions, then this would be a waste of memory as we are
* generating 2 bitmaps (ack/seq) from the min address to the max
* address of the VM's address regions. In the future, if we support
* more than one region in the device/driver we can split the bitmaps
* on the largest address region gaps. We can do this split up to the
* max_regions times returned from the dirty_status command.
*/
max_regions = 1;
if (num_ranges > max_regions) {
vfio_combine_iova_ranges(ranges, nnodes, max_regions);
num_ranges = max_regions;
}
node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
if (!node)
return -EINVAL;
region_size = node->last - node->start + 1;
region_start = node->start;
region_page_size = *page_size;
len = sizeof(*region_info);
region_info = kzalloc(len, GFP_KERNEL);
if (!region_info)
return -ENOMEM;
page_count = DIV_ROUND_UP(region_size, region_page_size);
region_info->dma_base = cpu_to_le64(region_start);
region_info->page_count = cpu_to_le32(page_count);
region_info->page_size_log2 = ilog2(region_page_size);
regions_dma = dma_map_single(pdsc_dev, (void *)region_info, len,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(pdsc_dev, regions_dma)) {
err = -ENOMEM;
goto out_free_region_info;
}
err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, max_regions);
dma_unmap_single(pdsc_dev, regions_dma, len, DMA_BIDIRECTIONAL);
if (err)
goto out_free_region_info;
/*
* page_count might be adjusted by the device,
* update it before freeing region_info DMA
*/
page_count = le32_to_cpu(region_info->page_count);
dev_dbg(&pdev->dev,
"region_info: regions_dma 0x%llx dma_base 0x%llx page_count %u page_size_log2 %u\n",
regions_dma, region_start, page_count,
(u8)ilog2(region_page_size));
err = pds_vfio_dirty_alloc_bitmaps(dirty, page_count / BITS_PER_BYTE);
if (err) {
dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
ERR_PTR(err));
goto out_free_region_info;
}
err = pds_vfio_dirty_alloc_sgl(pds_vfio, page_count);
if (err) {
dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
ERR_PTR(err));
goto out_free_bitmaps;
}
dirty->region_start = region_start;
dirty->region_size = region_size;
dirty->region_page_size = region_page_size;
pds_vfio_dirty_set_enabled(pds_vfio);
pds_vfio_print_guest_region_info(pds_vfio, max_regions);
kfree(region_info);
return 0;
out_free_bitmaps:
pds_vfio_dirty_free_bitmaps(dirty);
out_free_region_info:
kfree(region_info);
return err;
}
void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
{
if (pds_vfio_dirty_is_enabled(pds_vfio)) {
pds_vfio_dirty_set_disabled(pds_vfio);
if (send_cmd)
pds_vfio_dirty_disable_cmd(pds_vfio);
pds_vfio_dirty_free_sgl(pds_vfio);
pds_vfio_dirty_free_bitmaps(&pds_vfio->dirty);
}
if (send_cmd)
pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_NONE);
}
static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
struct pds_vfio_bmp_info *bmp_info,
u32 offset, u32 bmp_bytes, bool read_seq)
{
const char *bmp_type_str = read_seq ? "read_seq" : "write_ack";
u8 dma_dir = read_seq ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
unsigned long long npages;
struct sg_table sg_table;
struct scatterlist *sg;
struct page **pages;
u32 page_offset;
const void *bmp;
size_t size;
u16 num_sge;
int err;
int i;
bmp = (void *)((u64)bmp_info->bmp + offset);
page_offset = offset_in_page(bmp);
bmp -= page_offset;
/*
* Start and end of bitmap section to seq/ack might not be page
* aligned, so use the page_offset to account for that so there
* will be enough pages to represent the bmp_bytes
*/
npages = DIV_ROUND_UP_ULL(bmp_bytes + page_offset, PAGE_SIZE);
pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
for (unsigned long long i = 0; i < npages; i++) {
struct page *page = vmalloc_to_page(bmp);
if (!page) {
err = -EFAULT;
goto out_free_pages;
}
pages[i] = page;
bmp += PAGE_SIZE;
}
err = sg_alloc_table_from_pages(&sg_table, pages, npages, page_offset,
bmp_bytes, GFP_KERNEL);
if (err)
goto out_free_pages;
err = dma_map_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
if (err)
goto out_free_sg_table;
for_each_sgtable_dma_sg(&sg_table, sg, i) {
struct pds_lm_sg_elem *sg_elem = &bmp_info->sgl[i];
sg_elem->addr = cpu_to_le64(sg_dma_address(sg));
sg_elem->len = cpu_to_le32(sg_dma_len(sg));
}
num_sge = sg_table.nents;
size = num_sge * sizeof(struct pds_lm_sg_elem);
dma_sync_single_for_device(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, bmp_info->sgl_addr, num_sge,
offset, bmp_bytes, read_seq);
if (err)
dev_err(&pdev->dev,
"Dirty bitmap %s failed offset %u bmp_bytes %u num_sge %u DMA 0x%llx: %pe\n",
bmp_type_str, offset, bmp_bytes,
num_sge, bmp_info->sgl_addr, ERR_PTR(err));
dma_sync_single_for_cpu(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
dma_unmap_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
out_free_sg_table:
sg_free_table(&sg_table);
out_free_pages:
kfree(pages);
return err;
}
static int pds_vfio_dirty_write_ack(struct pds_vfio_pci_device *pds_vfio,
u32 offset, u32 len)
{
return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_ack,
offset, len, WRITE_ACK);
}
static int pds_vfio_dirty_read_seq(struct pds_vfio_pci_device *pds_vfio,
u32 offset, u32 len)
{
return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_seq,
offset, len, READ_SEQ);
}
static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
struct iova_bitmap *dirty_bitmap,
u32 bmp_offset, u32 len_bytes)
{
u64 page_size = pds_vfio->dirty.region_page_size;
u64 region_start = pds_vfio->dirty.region_start;
u32 bmp_offset_bit;
__le64 *seq, *ack;
int dword_count;
dword_count = len_bytes / sizeof(u64);
seq = (__le64 *)((u64)pds_vfio->dirty.host_seq.bmp + bmp_offset);
ack = (__le64 *)((u64)pds_vfio->dirty.host_ack.bmp + bmp_offset);
bmp_offset_bit = bmp_offset * 8;
for (int i = 0; i < dword_count; i++) {
u64 xor = le64_to_cpu(seq[i]) ^ le64_to_cpu(ack[i]);
/* prepare for next write_ack call */
ack[i] = seq[i];
for (u8 bit_i = 0; bit_i < BITS_PER_TYPE(u64); ++bit_i) {
if (xor & BIT(bit_i)) {
u64 abs_bit_i = bmp_offset_bit +
i * BITS_PER_TYPE(u64) + bit_i;
u64 addr = abs_bit_i * page_size + region_start;
iova_bitmap_set(dirty_bitmap, addr, page_size);
}
}
}
return 0;
}
static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
struct iova_bitmap *dirty_bitmap,
unsigned long iova, unsigned long length)
{
struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
u64 bmp_offset, bmp_bytes;
u64 bitmap_size, pages;
int err;
dev_dbg(dev, "vf%u: Get dirty page bitmap\n", pds_vfio->vf_id);
if (!pds_vfio_dirty_is_enabled(pds_vfio)) {
dev_err(dev, "vf%u: Sync failed, dirty tracking is disabled\n",
pds_vfio->vf_id);
return -EINVAL;
}
pages = DIV_ROUND_UP(length, pds_vfio->dirty.region_page_size);
bitmap_size =
round_up(pages, sizeof(u64) * BITS_PER_BYTE) / BITS_PER_BYTE;
dev_dbg(dev,
"vf%u: iova 0x%lx length %lu page_size %llu pages %llu bitmap_size %llu\n",
pds_vfio->vf_id, iova, length, pds_vfio->dirty.region_page_size,
pages, bitmap_size);
if (!length || ((dirty->region_start + iova + length) >
(dirty->region_start + dirty->region_size))) {
dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
iova, length);
return -EINVAL;
}
/* bitmap is modified in 64 bit chunks */
bmp_bytes = ALIGN(DIV_ROUND_UP(length / dirty->region_page_size,
sizeof(u64)),
sizeof(u64));
if (bmp_bytes != bitmap_size) {
dev_err(dev,
"Calculated bitmap bytes %llu not equal to bitmap size %llu\n",
bmp_bytes, bitmap_size);
return -EINVAL;
}
bmp_offset = DIV_ROUND_UP(iova / dirty->region_page_size, sizeof(u64));
dev_dbg(dev,
"Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
iova, length, bmp_offset, bmp_bytes);
err = pds_vfio_dirty_read_seq(pds_vfio, bmp_offset, bmp_bytes);
if (err)
return err;
err = pds_vfio_dirty_process_bitmaps(pds_vfio, dirty_bitmap, bmp_offset,
bmp_bytes);
if (err)
return err;
err = pds_vfio_dirty_write_ack(pds_vfio, bmp_offset, bmp_bytes);
if (err)
return err;
return 0;
}
int pds_vfio_dma_logging_report(struct vfio_device *vdev, unsigned long iova,
unsigned long length, struct iova_bitmap *dirty)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
int err;
mutex_lock(&pds_vfio->state_mutex);
err = pds_vfio_dirty_sync(pds_vfio, dirty, iova, length);
pds_vfio_state_mutex_unlock(pds_vfio);
return err;
}
int pds_vfio_dma_logging_start(struct vfio_device *vdev,
struct rb_root_cached *ranges, u32 nnodes,
u64 *page_size)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
int err;
mutex_lock(&pds_vfio->state_mutex);
pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_IN_PROGRESS);
err = pds_vfio_dirty_enable(pds_vfio, ranges, nnodes, page_size);
pds_vfio_state_mutex_unlock(pds_vfio);
return err;
}
int pds_vfio_dma_logging_stop(struct vfio_device *vdev)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
mutex_lock(&pds_vfio->state_mutex);
pds_vfio_dirty_disable(pds_vfio, true);
pds_vfio_state_mutex_unlock(pds_vfio);
return 0;
}
| linux-master | drivers/vfio/pci/pds/dirty.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/vfio.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_core_if.h>
#include <linux/pds/pds_adminq.h>
#include "vfio_dev.h"
#include "pci_drv.h"
#include "cmds.h"
#define PDS_VFIO_DRV_DESCRIPTION "AMD/Pensando VFIO Device Driver"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
{
bool deferred_reset_needed = false;
/*
* Documentation states that the kernel migration driver must not
* generate asynchronous device state transitions outside of
* manipulation by the user or the VFIO_DEVICE_RESET ioctl.
*
* Since recovery is an asynchronous event received from the device,
* initiate a deferred reset. Issue a deferred reset in the following
* situations:
* 1. Migration is in progress, which will cause the next step of
* the migration to fail.
* 2. If the device is in a state that will be set to
* VFIO_DEVICE_STATE_RUNNING on the next action (i.e. VM is
* shutdown and device is in VFIO_DEVICE_STATE_STOP).
*/
mutex_lock(&pds_vfio->state_mutex);
if ((pds_vfio->state != VFIO_DEVICE_STATE_RUNNING &&
pds_vfio->state != VFIO_DEVICE_STATE_ERROR) ||
(pds_vfio->state == VFIO_DEVICE_STATE_RUNNING &&
pds_vfio_dirty_is_enabled(pds_vfio)))
deferred_reset_needed = true;
mutex_unlock(&pds_vfio->state_mutex);
/*
* On the next user initiated state transition, the device will
* transition to the VFIO_DEVICE_STATE_ERROR. At this point it's the user's
* responsibility to reset the device.
*
* If a VFIO_DEVICE_RESET is requested post recovery and before the next
* state transition, then the deferred reset state will be set to
* VFIO_DEVICE_STATE_RUNNING.
*/
if (deferred_reset_needed) {
spin_lock(&pds_vfio->reset_lock);
pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
spin_unlock(&pds_vfio->reset_lock);
}
}
static int pds_vfio_pci_notify_handler(struct notifier_block *nb,
unsigned long ecode, void *data)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(nb, struct pds_vfio_pci_device, nb);
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_notifyq_comp *event = data;
dev_dbg(dev, "%s: event code %lu\n", __func__, ecode);
/*
* We don't need to do anything for RESET state==0 as there is no notify
* or feedback mechanism available, and it is possible that we won't
* even see a state==0 event since the pds_core recovery is pending.
*
* Any requests from VFIO while state==0 will fail, which will return
* error and may cause migration to fail.
*/
if (ecode == PDS_EVENT_RESET) {
dev_info(dev, "%s: PDS_EVENT_RESET event received, state==%d\n",
__func__, event->reset.state);
/*
* pds_core device finished recovery and sent us the
* notification (state == 1) to allow us to recover
*/
if (event->reset.state == 1)
pds_vfio_recovery(pds_vfio);
}
return 0;
}
static int
pds_vfio_pci_register_event_handler(struct pds_vfio_pci_device *pds_vfio)
{
struct device *dev = pds_vfio_to_dev(pds_vfio);
struct notifier_block *nb = &pds_vfio->nb;
int err;
if (!nb->notifier_call) {
nb->notifier_call = pds_vfio_pci_notify_handler;
err = pdsc_register_notify(nb);
if (err) {
nb->notifier_call = NULL;
dev_err(dev,
"failed to register pds event handler: %pe\n",
ERR_PTR(err));
return -EINVAL;
}
dev_dbg(dev, "pds event handler registered\n");
}
return 0;
}
static void
pds_vfio_pci_unregister_event_handler(struct pds_vfio_pci_device *pds_vfio)
{
if (pds_vfio->nb.notifier_call) {
pdsc_unregister_notify(&pds_vfio->nb);
pds_vfio->nb.notifier_call = NULL;
}
}
static int pds_vfio_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pds_vfio_pci_device *pds_vfio;
int err;
pds_vfio = vfio_alloc_device(pds_vfio_pci_device, vfio_coredev.vdev,
&pdev->dev, pds_vfio_ops_info());
if (IS_ERR(pds_vfio))
return PTR_ERR(pds_vfio);
dev_set_drvdata(&pdev->dev, &pds_vfio->vfio_coredev);
err = vfio_pci_core_register_device(&pds_vfio->vfio_coredev);
if (err)
goto out_put_vdev;
err = pds_vfio_register_client_cmd(pds_vfio);
if (err) {
dev_err(&pdev->dev, "failed to register as client: %pe\n",
ERR_PTR(err));
goto out_unregister_coredev;
}
err = pds_vfio_pci_register_event_handler(pds_vfio);
if (err)
goto out_unregister_client;
return 0;
out_unregister_client:
pds_vfio_unregister_client_cmd(pds_vfio);
out_unregister_coredev:
vfio_pci_core_unregister_device(&pds_vfio->vfio_coredev);
out_put_vdev:
vfio_put_device(&pds_vfio->vfio_coredev.vdev);
return err;
}
static void pds_vfio_pci_remove(struct pci_dev *pdev)
{
struct pds_vfio_pci_device *pds_vfio = pds_vfio_pci_drvdata(pdev);
pds_vfio_pci_unregister_event_handler(pds_vfio);
pds_vfio_unregister_client_cmd(pds_vfio);
vfio_pci_core_unregister_device(&pds_vfio->vfio_coredev);
vfio_put_device(&pds_vfio->vfio_coredev.vdev);
}
static const struct pci_device_id pds_vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_PENSANDO, 0x1003) }, /* Ethernet VF */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pds_vfio_pci_table);
static void pds_vfio_pci_aer_reset_done(struct pci_dev *pdev)
{
struct pds_vfio_pci_device *pds_vfio = pds_vfio_pci_drvdata(pdev);
pds_vfio_reset(pds_vfio);
}
static const struct pci_error_handlers pds_vfio_pci_err_handlers = {
.reset_done = pds_vfio_pci_aer_reset_done,
.error_detected = vfio_pci_core_aer_err_detected,
};
static struct pci_driver pds_vfio_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = pds_vfio_pci_table,
.probe = pds_vfio_pci_probe,
.remove = pds_vfio_pci_remove,
.err_handler = &pds_vfio_pci_err_handlers,
.driver_managed_dma = true,
};
module_pci_driver(pds_vfio_pci_driver);
MODULE_DESCRIPTION(PDS_VFIO_DRV_DESCRIPTION);
MODULE_AUTHOR("Brett Creeley <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/vfio/pci/pds/pci_drv.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/vfio.h>
#include <linux/vfio_pci_core.h>
#include "vfio_dev.h"
#include "cmds.h"
static struct pds_vfio_lm_file *
pds_vfio_get_lm_file(const struct file_operations *fops, int flags, u64 size)
{
struct pds_vfio_lm_file *lm_file = NULL;
unsigned long long npages;
struct page **pages;
void *page_mem;
const void *p;
if (!size)
return NULL;
/* Alloc file structure */
lm_file = kzalloc(sizeof(*lm_file), GFP_KERNEL);
if (!lm_file)
return NULL;
/* Create file */
lm_file->filep =
anon_inode_getfile("pds_vfio_lm", fops, lm_file, flags);
if (IS_ERR(lm_file->filep))
goto out_free_file;
stream_open(lm_file->filep->f_inode, lm_file->filep);
mutex_init(&lm_file->lock);
/* prevent file from being released before we are done with it */
get_file(lm_file->filep);
/* Allocate memory for file pages */
npages = DIV_ROUND_UP_ULL(size, PAGE_SIZE);
pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
if (!pages)
goto out_put_file;
page_mem = kvzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
if (!page_mem)
goto out_free_pages_array;
p = page_mem - offset_in_page(page_mem);
for (unsigned long long i = 0; i < npages; i++) {
if (is_vmalloc_addr(p))
pages[i] = vmalloc_to_page(p);
else
pages[i] = kmap_to_page((void *)p);
if (!pages[i])
goto out_free_page_mem;
p += PAGE_SIZE;
}
/* Create scatterlist of file pages to use for DMA mapping later */
if (sg_alloc_table_from_pages(&lm_file->sg_table, pages, npages, 0,
size, GFP_KERNEL))
goto out_free_page_mem;
lm_file->size = size;
lm_file->pages = pages;
lm_file->npages = npages;
lm_file->page_mem = page_mem;
lm_file->alloc_size = npages * PAGE_SIZE;
return lm_file;
out_free_page_mem:
kvfree(page_mem);
out_free_pages_array:
kfree(pages);
out_put_file:
fput(lm_file->filep);
mutex_destroy(&lm_file->lock);
out_free_file:
kfree(lm_file);
return NULL;
}
static void pds_vfio_put_lm_file(struct pds_vfio_lm_file *lm_file)
{
mutex_lock(&lm_file->lock);
lm_file->size = 0;
lm_file->alloc_size = 0;
/* Free scatter list of file pages */
sg_free_table(&lm_file->sg_table);
kvfree(lm_file->page_mem);
lm_file->page_mem = NULL;
kfree(lm_file->pages);
lm_file->pages = NULL;
mutex_unlock(&lm_file->lock);
/* allow file to be released since we are done with it */
fput(lm_file->filep);
}
void pds_vfio_put_save_file(struct pds_vfio_pci_device *pds_vfio)
{
if (!pds_vfio->save_file)
return;
pds_vfio_put_lm_file(pds_vfio->save_file);
pds_vfio->save_file = NULL;
}
void pds_vfio_put_restore_file(struct pds_vfio_pci_device *pds_vfio)
{
if (!pds_vfio->restore_file)
return;
pds_vfio_put_lm_file(pds_vfio->restore_file);
pds_vfio->restore_file = NULL;
}
static struct page *pds_vfio_get_file_page(struct pds_vfio_lm_file *lm_file,
unsigned long offset)
{
unsigned long cur_offset = 0;
struct scatterlist *sg;
unsigned int i;
/* All accesses are sequential */
if (offset < lm_file->last_offset || !lm_file->last_offset_sg) {
lm_file->last_offset = 0;
lm_file->last_offset_sg = lm_file->sg_table.sgl;
lm_file->sg_last_entry = 0;
}
cur_offset = lm_file->last_offset;
for_each_sg(lm_file->last_offset_sg, sg,
lm_file->sg_table.orig_nents - lm_file->sg_last_entry, i) {
if (offset < sg->length + cur_offset) {
lm_file->last_offset_sg = sg;
lm_file->sg_last_entry += i;
lm_file->last_offset = cur_offset;
return nth_page(sg_page(sg),
(offset - cur_offset) / PAGE_SIZE);
}
cur_offset += sg->length;
}
return NULL;
}
static int pds_vfio_release_file(struct inode *inode, struct file *filp)
{
struct pds_vfio_lm_file *lm_file = filp->private_data;
mutex_lock(&lm_file->lock);
lm_file->filep->f_pos = 0;
lm_file->size = 0;
mutex_unlock(&lm_file->lock);
mutex_destroy(&lm_file->lock);
kfree(lm_file);
return 0;
}
static ssize_t pds_vfio_save_read(struct file *filp, char __user *buf,
size_t len, loff_t *pos)
{
struct pds_vfio_lm_file *lm_file = filp->private_data;
ssize_t done = 0;
if (pos)
return -ESPIPE;
pos = &filp->f_pos;
mutex_lock(&lm_file->lock);
if (*pos > lm_file->size) {
done = -EINVAL;
goto out_unlock;
}
len = min_t(size_t, lm_file->size - *pos, len);
while (len) {
size_t page_offset;
struct page *page;
size_t page_len;
u8 *from_buff;
int err;
page_offset = (*pos) % PAGE_SIZE;
page = pds_vfio_get_file_page(lm_file, *pos - page_offset);
if (!page) {
if (done == 0)
done = -EINVAL;
goto out_unlock;
}
page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
from_buff = kmap_local_page(page);
err = copy_to_user(buf, from_buff + page_offset, page_len);
kunmap_local(from_buff);
if (err) {
done = -EFAULT;
goto out_unlock;
}
*pos += page_len;
len -= page_len;
done += page_len;
buf += page_len;
}
out_unlock:
mutex_unlock(&lm_file->lock);
return done;
}
static const struct file_operations pds_vfio_save_fops = {
.owner = THIS_MODULE,
.read = pds_vfio_save_read,
.release = pds_vfio_release_file,
.llseek = no_llseek,
};
static int pds_vfio_get_save_file(struct pds_vfio_pci_device *pds_vfio)
{
struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
struct pds_vfio_lm_file *lm_file;
u64 size;
int err;
/* Get live migration state size in this state */
err = pds_vfio_get_lm_state_size_cmd(pds_vfio, &size);
if (err) {
dev_err(dev, "failed to get save status: %pe\n", ERR_PTR(err));
return err;
}
dev_dbg(dev, "save status, size = %lld\n", size);
if (!size) {
dev_err(dev, "invalid state size\n");
return -EIO;
}
lm_file = pds_vfio_get_lm_file(&pds_vfio_save_fops, O_RDONLY, size);
if (!lm_file) {
dev_err(dev, "failed to create save file\n");
return -ENOENT;
}
dev_dbg(dev, "size = %lld, alloc_size = %lld, npages = %lld\n",
lm_file->size, lm_file->alloc_size, lm_file->npages);
pds_vfio->save_file = lm_file;
return 0;
}
static ssize_t pds_vfio_restore_write(struct file *filp, const char __user *buf,
size_t len, loff_t *pos)
{
struct pds_vfio_lm_file *lm_file = filp->private_data;
loff_t requested_length;
ssize_t done = 0;
if (pos)
return -ESPIPE;
pos = &filp->f_pos;
if (*pos < 0 ||
check_add_overflow((loff_t)len, *pos, &requested_length))
return -EINVAL;
mutex_lock(&lm_file->lock);
while (len) {
size_t page_offset;
struct page *page;
size_t page_len;
u8 *to_buff;
int err;
page_offset = (*pos) % PAGE_SIZE;
page = pds_vfio_get_file_page(lm_file, *pos - page_offset);
if (!page) {
if (done == 0)
done = -EINVAL;
goto out_unlock;
}
page_len = min_t(size_t, len, PAGE_SIZE - page_offset);
to_buff = kmap_local_page(page);
err = copy_from_user(to_buff + page_offset, buf, page_len);
kunmap_local(to_buff);
if (err) {
done = -EFAULT;
goto out_unlock;
}
*pos += page_len;
len -= page_len;
done += page_len;
buf += page_len;
lm_file->size += page_len;
}
out_unlock:
mutex_unlock(&lm_file->lock);
return done;
}
static const struct file_operations pds_vfio_restore_fops = {
.owner = THIS_MODULE,
.write = pds_vfio_restore_write,
.release = pds_vfio_release_file,
.llseek = no_llseek,
};
static int pds_vfio_get_restore_file(struct pds_vfio_pci_device *pds_vfio)
{
struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
struct pds_vfio_lm_file *lm_file;
u64 size;
size = sizeof(union pds_lm_dev_state);
dev_dbg(dev, "restore status, size = %lld\n", size);
if (!size) {
dev_err(dev, "invalid state size");
return -EIO;
}
lm_file = pds_vfio_get_lm_file(&pds_vfio_restore_fops, O_WRONLY, size);
if (!lm_file) {
dev_err(dev, "failed to create restore file");
return -ENOENT;
}
pds_vfio->restore_file = lm_file;
return 0;
}
struct file *
pds_vfio_step_device_state_locked(struct pds_vfio_pci_device *pds_vfio,
enum vfio_device_mig_state next)
{
enum vfio_device_mig_state cur = pds_vfio->state;
int err;
if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_STOP_COPY) {
err = pds_vfio_get_save_file(pds_vfio);
if (err)
return ERR_PTR(err);
err = pds_vfio_get_lm_state_cmd(pds_vfio);
if (err) {
pds_vfio_put_save_file(pds_vfio);
return ERR_PTR(err);
}
return pds_vfio->save_file->filep;
}
if (cur == VFIO_DEVICE_STATE_STOP_COPY && next == VFIO_DEVICE_STATE_STOP) {
pds_vfio_put_save_file(pds_vfio);
pds_vfio_dirty_disable(pds_vfio, true);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RESUMING) {
err = pds_vfio_get_restore_file(pds_vfio);
if (err)
return ERR_PTR(err);
return pds_vfio->restore_file->filep;
}
if (cur == VFIO_DEVICE_STATE_RESUMING && next == VFIO_DEVICE_STATE_STOP) {
err = pds_vfio_set_lm_state_cmd(pds_vfio);
if (err)
return ERR_PTR(err);
pds_vfio_put_restore_file(pds_vfio);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_RUNNING && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
pds_vfio_send_host_vf_lm_status_cmd(pds_vfio,
PDS_LM_STA_IN_PROGRESS);
err = pds_vfio_suspend_device_cmd(pds_vfio,
PDS_LM_SUSPEND_RESUME_TYPE_P2P);
if (err)
return ERR_PTR(err);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_RUNNING) {
err = pds_vfio_resume_device_cmd(pds_vfio,
PDS_LM_SUSPEND_RESUME_TYPE_FULL);
if (err)
return ERR_PTR(err);
pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_NONE);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
err = pds_vfio_resume_device_cmd(pds_vfio,
PDS_LM_SUSPEND_RESUME_TYPE_P2P);
if (err)
return ERR_PTR(err);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_STOP) {
err = pds_vfio_suspend_device_cmd(pds_vfio,
PDS_LM_SUSPEND_RESUME_TYPE_FULL);
if (err)
return ERR_PTR(err);
return NULL;
}
return ERR_PTR(-EINVAL);
}
| linux-master | drivers/vfio/pci/pds/lm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
#include <linux/io.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/pds/pds_common.h>
#include <linux/pds/pds_core_if.h>
#include <linux/pds/pds_adminq.h>
#include "vfio_dev.h"
#include "cmds.h"
#define SUSPEND_TIMEOUT_S 5
#define SUSPEND_CHECK_INTERVAL_MS 1
static int pds_vfio_client_adminq_cmd(struct pds_vfio_pci_device *pds_vfio,
union pds_core_adminq_cmd *req,
union pds_core_adminq_comp *resp,
bool fast_poll)
{
struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
union pds_core_adminq_cmd cmd = {};
struct pdsc *pdsc;
int err;
/* Wrap the client request */
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
cmd.client_request.client_id = cpu_to_le16(pds_vfio->client_id);
memcpy(cmd.client_request.client_cmd, req,
sizeof(cmd.client_request.client_cmd));
pdsc = pdsc_get_pf_struct(pdev);
if (IS_ERR(pdsc))
return PTR_ERR(pdsc);
err = pdsc_adminq_post(pdsc, &cmd, resp, fast_poll);
if (err && err != -EAGAIN)
dev_err(pds_vfio_to_dev(pds_vfio),
"client admin cmd failed: %pe\n", ERR_PTR(err));
return err;
}
int pds_vfio_register_client_cmd(struct pds_vfio_pci_device *pds_vfio)
{
struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
char devname[PDS_DEVNAME_LEN];
struct pdsc *pdsc;
int ci;
snprintf(devname, sizeof(devname), "%s.%d-%u", PDS_VFIO_LM_DEV_NAME,
pci_domain_nr(pdev->bus),
PCI_DEVID(pdev->bus->number, pdev->devfn));
pdsc = pdsc_get_pf_struct(pdev);
if (IS_ERR(pdsc))
return PTR_ERR(pdsc);
ci = pds_client_register(pdsc, devname);
if (ci < 0)
return ci;
pds_vfio->client_id = ci;
return 0;
}
void pds_vfio_unregister_client_cmd(struct pds_vfio_pci_device *pds_vfio)
{
struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
struct pdsc *pdsc;
int err;
pdsc = pdsc_get_pf_struct(pdev);
if (IS_ERR(pdsc))
return;
err = pds_client_unregister(pdsc, pds_vfio->client_id);
if (err)
dev_err(&pdev->dev, "unregister from DSC failed: %pe\n",
ERR_PTR(err));
pds_vfio->client_id = 0;
}
static int
pds_vfio_suspend_wait_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
{
union pds_core_adminq_cmd cmd = {
.lm_suspend_status = {
.opcode = PDS_LM_CMD_SUSPEND_STATUS,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
.type = type,
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
int err;
time_start = jiffies;
time_limit = time_start + HZ * SUSPEND_TIMEOUT_S;
do {
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
if (err != -EAGAIN)
break;
msleep(SUSPEND_CHECK_INTERVAL_MS);
} while (time_before(jiffies, time_limit));
time_done = jiffies;
dev_dbg(dev, "%s: vf%u: Suspend comp received in %d msecs\n", __func__,
pds_vfio->vf_id, jiffies_to_msecs(time_done - time_start));
/* Check the results */
if (time_after_eq(time_done, time_limit)) {
dev_err(dev, "%s: vf%u: Suspend comp timeout\n", __func__,
pds_vfio->vf_id);
err = -ETIMEDOUT;
}
return err;
}
int pds_vfio_suspend_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
{
union pds_core_adminq_cmd cmd = {
.lm_suspend = {
.opcode = PDS_LM_CMD_SUSPEND,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
.type = type,
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
int err;
dev_dbg(dev, "vf%u: Suspend device\n", pds_vfio->vf_id);
/*
* The initial suspend request to the firmware starts the device suspend
* operation and the firmware returns success if it's started
* successfully.
*/
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
if (err) {
dev_err(dev, "vf%u: Suspend failed: %pe\n", pds_vfio->vf_id,
ERR_PTR(err));
return err;
}
/*
* The subsequent suspend status request(s) check if the firmware has
* completed the device suspend process.
*/
return pds_vfio_suspend_wait_device_cmd(pds_vfio, type);
}
int pds_vfio_resume_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type)
{
union pds_core_adminq_cmd cmd = {
.lm_resume = {
.opcode = PDS_LM_CMD_RESUME,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
.type = type,
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
dev_dbg(dev, "vf%u: Resume device\n", pds_vfio->vf_id);
return pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true);
}
int pds_vfio_get_lm_state_size_cmd(struct pds_vfio_pci_device *pds_vfio, u64 *size)
{
union pds_core_adminq_cmd cmd = {
.lm_state_size = {
.opcode = PDS_LM_CMD_STATE_SIZE,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
int err;
dev_dbg(dev, "vf%u: Get migration status\n", pds_vfio->vf_id);
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err)
return err;
*size = le64_to_cpu(comp.lm_state_size.size);
return 0;
}
static int pds_vfio_dma_map_lm_file(struct device *dev,
enum dma_data_direction dir,
struct pds_vfio_lm_file *lm_file)
{
struct pds_lm_sg_elem *sgl, *sge;
struct scatterlist *sg;
dma_addr_t sgl_addr;
size_t sgl_size;
int err;
int i;
if (!lm_file)
return -EINVAL;
/* dma map file pages */
err = dma_map_sgtable(dev, &lm_file->sg_table, dir, 0);
if (err)
return err;
lm_file->num_sge = lm_file->sg_table.nents;
/* alloc sgl */
sgl_size = lm_file->num_sge * sizeof(struct pds_lm_sg_elem);
sgl = kzalloc(sgl_size, GFP_KERNEL);
if (!sgl) {
err = -ENOMEM;
goto out_unmap_sgtable;
}
/* fill sgl */
sge = sgl;
for_each_sgtable_dma_sg(&lm_file->sg_table, sg, i) {
sge->addr = cpu_to_le64(sg_dma_address(sg));
sge->len = cpu_to_le32(sg_dma_len(sg));
dev_dbg(dev, "addr = %llx, len = %u\n", sge->addr, sge->len);
sge++;
}
sgl_addr = dma_map_single(dev, sgl, sgl_size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, sgl_addr)) {
err = -EIO;
goto out_free_sgl;
}
lm_file->sgl = sgl;
lm_file->sgl_addr = sgl_addr;
return 0;
out_free_sgl:
kfree(sgl);
out_unmap_sgtable:
lm_file->num_sge = 0;
dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0);
return err;
}
static void pds_vfio_dma_unmap_lm_file(struct device *dev,
enum dma_data_direction dir,
struct pds_vfio_lm_file *lm_file)
{
if (!lm_file)
return;
/* free sgl */
if (lm_file->sgl) {
dma_unmap_single(dev, lm_file->sgl_addr,
lm_file->num_sge * sizeof(*lm_file->sgl),
DMA_TO_DEVICE);
kfree(lm_file->sgl);
lm_file->sgl = NULL;
lm_file->sgl_addr = DMA_MAPPING_ERROR;
lm_file->num_sge = 0;
}
/* dma unmap file pages */
dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0);
}
int pds_vfio_get_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio)
{
union pds_core_adminq_cmd cmd = {
.lm_save = {
.opcode = PDS_LM_CMD_SAVE,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
},
};
struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
union pds_core_adminq_comp comp = {};
struct pds_vfio_lm_file *lm_file;
int err;
dev_dbg(&pdev->dev, "vf%u: Get migration state\n", pds_vfio->vf_id);
lm_file = pds_vfio->save_file;
err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file);
if (err) {
dev_err(&pdev->dev, "failed to map save migration file: %pe\n",
ERR_PTR(err));
return err;
}
cmd.lm_save.sgl_addr = cpu_to_le64(lm_file->sgl_addr);
cmd.lm_save.num_sge = cpu_to_le32(lm_file->num_sge);
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err)
dev_err(&pdev->dev, "failed to get migration state: %pe\n",
ERR_PTR(err));
pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file);
return err;
}
int pds_vfio_set_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio)
{
union pds_core_adminq_cmd cmd = {
.lm_restore = {
.opcode = PDS_LM_CMD_RESTORE,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
},
};
struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio);
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
union pds_core_adminq_comp comp = {};
struct pds_vfio_lm_file *lm_file;
int err;
dev_dbg(&pdev->dev, "vf%u: Set migration state\n", pds_vfio->vf_id);
lm_file = pds_vfio->restore_file;
err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file);
if (err) {
dev_err(&pdev->dev,
"failed to map restore migration file: %pe\n",
ERR_PTR(err));
return err;
}
cmd.lm_restore.sgl_addr = cpu_to_le64(lm_file->sgl_addr);
cmd.lm_restore.num_sge = cpu_to_le32(lm_file->num_sge);
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err)
dev_err(&pdev->dev, "failed to set migration state: %pe\n",
ERR_PTR(err));
pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file);
return err;
}
void pds_vfio_send_host_vf_lm_status_cmd(struct pds_vfio_pci_device *pds_vfio,
enum pds_lm_host_vf_status vf_status)
{
union pds_core_adminq_cmd cmd = {
.lm_host_vf_status = {
.opcode = PDS_LM_CMD_HOST_VF_STATUS,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
.status = vf_status,
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
int err;
dev_dbg(dev, "vf%u: Set host VF LM status: %u", pds_vfio->vf_id,
vf_status);
if (vf_status != PDS_LM_STA_IN_PROGRESS &&
vf_status != PDS_LM_STA_NONE) {
dev_warn(dev, "Invalid host VF migration status, %d\n",
vf_status);
return;
}
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err)
dev_warn(dev, "failed to send host VF migration status: %pe\n",
ERR_PTR(err));
}
int pds_vfio_dirty_status_cmd(struct pds_vfio_pci_device *pds_vfio,
u64 regions_dma, u8 *max_regions, u8 *num_regions)
{
union pds_core_adminq_cmd cmd = {
.lm_dirty_status = {
.opcode = PDS_LM_CMD_DIRTY_STATUS,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
int err;
dev_dbg(dev, "vf%u: Dirty status\n", pds_vfio->vf_id);
cmd.lm_dirty_status.regions_dma = cpu_to_le64(regions_dma);
cmd.lm_dirty_status.max_regions = *max_regions;
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err) {
dev_err(dev, "failed to get dirty status: %pe\n", ERR_PTR(err));
return err;
}
/* only support seq_ack approach for now */
if (!(le32_to_cpu(comp.lm_dirty_status.bmp_type_mask) &
BIT(PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK))) {
dev_err(dev, "Dirty bitmap tracking SEQ_ACK not supported\n");
return -EOPNOTSUPP;
}
*num_regions = comp.lm_dirty_status.num_regions;
*max_regions = comp.lm_dirty_status.max_regions;
dev_dbg(dev,
"Page Tracking Status command successful, max_regions: %d, num_regions: %d, bmp_type: %s\n",
*max_regions, *num_regions, "PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK");
return 0;
}
int pds_vfio_dirty_enable_cmd(struct pds_vfio_pci_device *pds_vfio,
u64 regions_dma, u8 num_regions)
{
union pds_core_adminq_cmd cmd = {
.lm_dirty_enable = {
.opcode = PDS_LM_CMD_DIRTY_ENABLE,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
.regions_dma = cpu_to_le64(regions_dma),
.bmp_type = PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK,
.num_regions = num_regions,
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
int err;
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err) {
dev_err(dev, "failed dirty tracking enable: %pe\n",
ERR_PTR(err));
return err;
}
return 0;
}
int pds_vfio_dirty_disable_cmd(struct pds_vfio_pci_device *pds_vfio)
{
union pds_core_adminq_cmd cmd = {
.lm_dirty_disable = {
.opcode = PDS_LM_CMD_DIRTY_DISABLE,
.vf_id = cpu_to_le16(pds_vfio->vf_id),
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
int err;
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err || comp.lm_dirty_status.num_regions != 0) {
/* in case num_regions is still non-zero after disable */
err = err ? err : -EIO;
dev_err(dev,
"failed dirty tracking disable: %pe, num_regions %d\n",
ERR_PTR(err), comp.lm_dirty_status.num_regions);
return err;
}
return 0;
}
int pds_vfio_dirty_seq_ack_cmd(struct pds_vfio_pci_device *pds_vfio,
u64 sgl_dma, u16 num_sge, u32 offset,
u32 total_len, bool read_seq)
{
const char *cmd_type_str = read_seq ? "read_seq" : "write_ack";
union pds_core_adminq_cmd cmd = {
.lm_dirty_seq_ack = {
.vf_id = cpu_to_le16(pds_vfio->vf_id),
.len_bytes = cpu_to_le32(total_len),
.off_bytes = cpu_to_le32(offset),
.sgl_addr = cpu_to_le64(sgl_dma),
.num_sge = cpu_to_le16(num_sge),
},
};
struct device *dev = pds_vfio_to_dev(pds_vfio);
union pds_core_adminq_comp comp = {};
int err;
if (read_seq)
cmd.lm_dirty_seq_ack.opcode = PDS_LM_CMD_DIRTY_READ_SEQ;
else
cmd.lm_dirty_seq_ack.opcode = PDS_LM_CMD_DIRTY_WRITE_ACK;
err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false);
if (err) {
dev_err(dev, "failed cmd Page Tracking %s: %pe\n", cmd_type_str,
ERR_PTR(err));
return err;
}
return 0;
}
| linux-master | drivers/vfio/pci/pds/cmds.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, HiSilicon Ltd.
*/
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/hisi_acc_qm.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vfio.h>
#include <linux/vfio_pci_core.h>
#include <linux/anon_inodes.h>
#include "hisi_acc_vfio_pci.h"
/* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
static int qm_wait_dev_not_ready(struct hisi_qm *qm)
{
u32 val;
return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
val, !(val & 0x1), MB_POLL_PERIOD_US,
MB_POLL_TIMEOUT_US);
}
/*
* Each state Reg is checked 100 times,
* with a delay of 100 microseconds after each check
*/
static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
{
int check_times = 0;
u32 state;
state = readl(qm->io_base + regs);
while (state && check_times < ERROR_CHECK_TIMEOUT) {
udelay(CHECK_DELAY_TIME);
state = readl(qm->io_base + regs);
check_times++;
}
return state;
}
static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
u32 *data, u8 nums)
{
int i;
if (nums < 1 || nums > QM_REGS_MAX_LEN)
return -EINVAL;
for (i = 0; i < nums; i++) {
data[i] = readl(qm->io_base + reg_addr);
reg_addr += QM_REG_ADDR_OFFSET;
}
return 0;
}
static int qm_write_regs(struct hisi_qm *qm, u32 reg,
u32 *data, u8 nums)
{
int i;
if (nums < 1 || nums > QM_REGS_MAX_LEN)
return -EINVAL;
for (i = 0; i < nums; i++)
writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
return 0;
}
static int qm_get_vft(struct hisi_qm *qm, u32 *base)
{
u64 sqc_vft;
u32 qp_num;
int ret;
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
if (ret)
return ret;
sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
QM_XQC_ADDR_OFFSET);
*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
return qp_num;
}
static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
{
int ret;
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
if (ret)
return ret;
*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
QM_XQC_ADDR_OFFSET);
return 0;
}
static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
{
int ret;
ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
if (ret)
return ret;
*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
QM_XQC_ADDR_OFFSET);
return 0;
}
static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
{
struct device *dev = &qm->pdev->dev;
int ret;
ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
if (ret) {
dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
return ret;
}
ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
if (ret) {
dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
return ret;
}
ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
&vf_data->ifc_int_source, 1);
if (ret) {
dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
return ret;
}
ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
if (ret) {
dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
return ret;
}
ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
if (ret) {
dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
return ret;
}
ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
if (ret) {
dev_err(dev, "failed to read QM_PAGE_SIZE\n");
return ret;
}
/* QM_EQC_DW has 7 regs */
ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
if (ret) {
dev_err(dev, "failed to read QM_EQC_DW\n");
return ret;
}
/* QM_AEQC_DW has 7 regs */
ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
if (ret) {
dev_err(dev, "failed to read QM_AEQC_DW\n");
return ret;
}
return 0;
}
static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
{
struct device *dev = &qm->pdev->dev;
int ret;
/* Check VF state */
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
return -EBUSY;
}
ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
if (ret) {
dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
return ret;
}
ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
if (ret) {
dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
return ret;
}
ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
&vf_data->ifc_int_source, 1);
if (ret) {
dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
return ret;
}
ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
if (ret) {
dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
return ret;
}
ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
if (ret) {
dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
return ret;
}
ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
if (ret) {
dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
return ret;
}
ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
if (ret) {
dev_err(dev, "failed to write QM_PAGE_SIZE\n");
return ret;
}
/* QM_EQC_DW has 7 regs */
ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
if (ret) {
dev_err(dev, "failed to write QM_EQC_DW\n");
return ret;
}
/* QM_AEQC_DW has 7 regs */
ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
if (ret) {
dev_err(dev, "failed to write QM_AEQC_DW\n");
return ret;
}
return 0;
}
static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
u16 index, u8 priority)
{
u64 doorbell;
u64 dbase;
u16 randata = 0;
if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
else
dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
((u64)randata << QM_DB_RAND_SHIFT_V2) |
((u64)index << QM_DB_INDEX_SHIFT_V2) |
((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
writeq(doorbell, qm->io_base + dbase);
}
static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
{
unsigned int val;
u64 sqc_vft;
u32 qp_num;
int ret;
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
val & BIT(0), MB_POLL_PERIOD_US,
MB_POLL_TIMEOUT_US);
if (ret)
return ret;
writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
/* 0 mean SQC VFT */
writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
writel(vf_id, qm->io_base + QM_VFT_CFG);
writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
val & BIT(0), MB_POLL_PERIOD_US,
MB_POLL_TIMEOUT_US);
if (ret)
return ret;
sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
QM_XQC_ADDR_OFFSET);
*rbase = QM_SQC_VFT_BASE_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
return qp_num;
}
static void qm_dev_cmd_init(struct hisi_qm *qm)
{
/* Clear VF communication status registers. */
writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
/* Enable pf and vf communication. */
writel(0x0, qm->io_base + QM_IFC_INT_MASK);
}
static int vf_qm_cache_wb(struct hisi_qm *qm)
{
unsigned int val;
writel(0x1, qm->io_base + QM_CACHE_WB_START);
if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
val, val & BIT(0), MB_POLL_PERIOD_US,
MB_POLL_TIMEOUT_US)) {
dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
return -EINVAL;
}
return 0;
}
static void vf_qm_fun_reset(struct hisi_qm *qm)
{
int i;
for (i = 0; i < qm->qp_num; i++)
qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
}
static int vf_qm_func_stop(struct hisi_qm *qm)
{
return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
}
static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_acc_vf_migration_file *migf)
{
struct acc_vf_data *vf_data = &migf->vf_data;
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
struct device *dev = &vf_qm->pdev->dev;
u32 que_iso_state;
int ret;
if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
return 0;
if (vf_data->acc_magic != ACC_DEV_MAGIC) {
dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
return -EINVAL;
}
if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
dev_err(dev, "failed to match VF devices\n");
return -EINVAL;
}
/* VF qp num check */
ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums\n");
return -EINVAL;
}
if (ret != vf_data->qp_num) {
dev_err(dev, "failed to match VF qp num\n");
return -EINVAL;
}
vf_qm->qp_num = ret;
/* VF isolation state check */
ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
if (ret) {
dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
return ret;
}
if (vf_data->que_iso_cfg != que_iso_state) {
dev_err(dev, "failed to match isolation state\n");
return -EINVAL;
}
ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
if (ret) {
dev_err(dev, "failed to write QM_VF_STATE\n");
return ret;
}
hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
hisi_acc_vdev->match_done = true;
return 0;
}
static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct acc_vf_data *vf_data)
{
struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
struct device *dev = &pf_qm->pdev->dev;
int vf_id = hisi_acc_vdev->vf_id;
int ret;
vf_data->acc_magic = ACC_DEV_MAGIC;
/* Save device id */
vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
/* VF qp num save from PF */
ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums!\n");
return -EINVAL;
}
vf_data->qp_num = ret;
/* VF isolation state save from PF */
ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
if (ret) {
dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
return ret;
}
return 0;
}
static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_acc_vf_migration_file *migf)
{
struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
struct device *dev = &qm->pdev->dev;
struct acc_vf_data *vf_data = &migf->vf_data;
int ret;
/* Return if only match data was transferred */
if (migf->total_length == QM_MATCH_SIZE)
return 0;
if (migf->total_length < sizeof(struct acc_vf_data))
return -EINVAL;
qm->eqe_dma = vf_data->eqe_dma;
qm->aeqe_dma = vf_data->aeqe_dma;
qm->sqc_dma = vf_data->sqc_dma;
qm->cqc_dma = vf_data->cqc_dma;
qm->qp_base = vf_data->qp_base;
qm->qp_num = vf_data->qp_num;
ret = qm_set_regs(qm, vf_data);
if (ret) {
dev_err(dev, "set VF regs failed\n");
return ret;
}
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
if (ret) {
dev_err(dev, "set sqc failed\n");
return ret;
}
ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
if (ret) {
dev_err(dev, "set cqc failed\n");
return ret;
}
qm_dev_cmd_init(qm);
return 0;
}
static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_acc_vf_migration_file *migf)
{
struct acc_vf_data *vf_data = &migf->vf_data;
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
struct device *dev = &vf_qm->pdev->dev;
int ret;
if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
/* Update state and return with match data */
vf_data->vf_qm_state = QM_NOT_READY;
hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
migf->total_length = QM_MATCH_SIZE;
return 0;
}
vf_data->vf_qm_state = QM_READY;
hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
ret = vf_qm_cache_wb(vf_qm);
if (ret) {
dev_err(dev, "failed to writeback QM Cache!\n");
return ret;
}
ret = qm_get_regs(vf_qm, vf_data);
if (ret)
return -EINVAL;
/* Every reg is 32 bit, the dma address is 64 bit. */
vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */
ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
if (ret) {
dev_err(dev, "failed to read SQC addr!\n");
return -EINVAL;
}
ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
if (ret) {
dev_err(dev, "failed to read CQC addr!\n");
return -EINVAL;
}
migf->total_length = sizeof(struct acc_vf_data);
return 0;
}
static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
{
struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
return container_of(core_device, struct hisi_acc_vf_core_device,
core_device);
}
/* Check the PF's RAS state and Function INT state */
static int
hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
struct device *dev = &qm->pdev->dev;
u32 state;
/* Check RAS state */
state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
if (state) {
dev_err(dev, "failed to check QM RAS state!\n");
return -EBUSY;
}
/* Check Function Communication state between PF and VF */
state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
if (state) {
dev_err(dev, "failed to check QM IFC INT state!\n");
return -EBUSY;
}
state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
if (state) {
dev_err(dev, "failed to check QM IFC INT SET state!\n");
return -EBUSY;
}
/* Check submodule task state */
switch (vf_pdev->device) {
case PCI_DEVICE_ID_HUAWEI_SEC_VF:
state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
if (state) {
dev_err(dev, "failed to check QM SEC Core INT state!\n");
return -EBUSY;
}
return 0;
case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
if (state) {
dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
return -EBUSY;
}
return 0;
case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
if (state) {
dev_err(dev, "failed to check QM ZIP Core INT state!\n");
return -EBUSY;
}
return 0;
default:
dev_err(dev, "failed to detect acc module type!\n");
return -EINVAL;
}
}
static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
{
mutex_lock(&migf->lock);
migf->disabled = true;
migf->total_length = 0;
migf->filp->f_pos = 0;
mutex_unlock(&migf->lock);
}
static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
if (hisi_acc_vdev->resuming_migf) {
hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
fput(hisi_acc_vdev->resuming_migf->filp);
hisi_acc_vdev->resuming_migf = NULL;
}
if (hisi_acc_vdev->saving_migf) {
hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
fput(hisi_acc_vdev->saving_migf->filp);
hisi_acc_vdev->saving_migf = NULL;
}
}
/*
* This function is called in all state_mutex unlock cases to
* handle a 'deferred_reset' if exists.
*/
static void
hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
again:
spin_lock(&hisi_acc_vdev->reset_lock);
if (hisi_acc_vdev->deferred_reset) {
hisi_acc_vdev->deferred_reset = false;
spin_unlock(&hisi_acc_vdev->reset_lock);
hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
hisi_acc_vf_disable_fds(hisi_acc_vdev);
goto again;
}
mutex_unlock(&hisi_acc_vdev->state_mutex);
spin_unlock(&hisi_acc_vdev->reset_lock);
}
static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
if (hisi_acc_vdev->vf_qm_state != QM_READY)
return;
/* Make sure the device is enabled */
qm_dev_cmd_init(vf_qm);
vf_qm_fun_reset(vf_qm);
}
static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct device *dev = &hisi_acc_vdev->vf_dev->dev;
struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
int ret;
/* Recover data to VF */
ret = vf_qm_load_data(hisi_acc_vdev, migf);
if (ret) {
dev_err(dev, "failed to recover the VF!\n");
return ret;
}
return 0;
}
static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
{
struct hisi_acc_vf_migration_file *migf = filp->private_data;
hisi_acc_vf_disable_fd(migf);
mutex_destroy(&migf->lock);
kfree(migf);
return 0;
}
static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
size_t len, loff_t *pos)
{
struct hisi_acc_vf_migration_file *migf = filp->private_data;
loff_t requested_length;
ssize_t done = 0;
int ret;
if (pos)
return -ESPIPE;
pos = &filp->f_pos;
if (*pos < 0 ||
check_add_overflow((loff_t)len, *pos, &requested_length))
return -EINVAL;
if (requested_length > sizeof(struct acc_vf_data))
return -ENOMEM;
mutex_lock(&migf->lock);
if (migf->disabled) {
done = -ENODEV;
goto out_unlock;
}
ret = copy_from_user(&migf->vf_data, buf, len);
if (ret) {
done = -EFAULT;
goto out_unlock;
}
*pos += len;
done = len;
migf->total_length += len;
ret = vf_qm_check_match(migf->hisi_acc_vdev, migf);
if (ret)
done = -EFAULT;
out_unlock:
mutex_unlock(&migf->lock);
return done;
}
static const struct file_operations hisi_acc_vf_resume_fops = {
.owner = THIS_MODULE,
.write = hisi_acc_vf_resume_write,
.release = hisi_acc_vf_release_file,
.llseek = no_llseek,
};
static struct hisi_acc_vf_migration_file *
hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct hisi_acc_vf_migration_file *migf;
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
if (!migf)
return ERR_PTR(-ENOMEM);
migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
O_WRONLY);
if (IS_ERR(migf->filp)) {
int err = PTR_ERR(migf->filp);
kfree(migf);
return ERR_PTR(err);
}
stream_open(migf->filp->f_inode, migf->filp);
mutex_init(&migf->lock);
migf->hisi_acc_vdev = hisi_acc_vdev;
return migf;
}
static long hisi_acc_vf_precopy_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct hisi_acc_vf_migration_file *migf = filp->private_data;
struct hisi_acc_vf_core_device *hisi_acc_vdev = migf->hisi_acc_vdev;
loff_t *pos = &filp->f_pos;
struct vfio_precopy_info info;
unsigned long minsz;
int ret;
if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
return -ENOTTY;
minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
mutex_lock(&hisi_acc_vdev->state_mutex);
if (hisi_acc_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY) {
mutex_unlock(&hisi_acc_vdev->state_mutex);
return -EINVAL;
}
mutex_lock(&migf->lock);
if (migf->disabled) {
ret = -ENODEV;
goto out;
}
if (*pos > migf->total_length) {
ret = -EINVAL;
goto out;
}
info.dirty_bytes = 0;
info.initial_bytes = migf->total_length - *pos;
ret = copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
out:
mutex_unlock(&migf->lock);
mutex_unlock(&hisi_acc_vdev->state_mutex);
return ret;
}
static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
loff_t *pos)
{
struct hisi_acc_vf_migration_file *migf = filp->private_data;
ssize_t done = 0;
int ret;
if (pos)
return -ESPIPE;
pos = &filp->f_pos;
mutex_lock(&migf->lock);
if (*pos > migf->total_length) {
done = -EINVAL;
goto out_unlock;
}
if (migf->disabled) {
done = -ENODEV;
goto out_unlock;
}
len = min_t(size_t, migf->total_length - *pos, len);
if (len) {
ret = copy_to_user(buf, &migf->vf_data, len);
if (ret) {
done = -EFAULT;
goto out_unlock;
}
*pos += len;
done = len;
}
out_unlock:
mutex_unlock(&migf->lock);
return done;
}
static const struct file_operations hisi_acc_vf_save_fops = {
.owner = THIS_MODULE,
.read = hisi_acc_vf_save_read,
.unlocked_ioctl = hisi_acc_vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = hisi_acc_vf_release_file,
.llseek = no_llseek,
};
static struct hisi_acc_vf_migration_file *
hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct hisi_acc_vf_migration_file *migf;
int ret;
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
if (!migf)
return ERR_PTR(-ENOMEM);
migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
O_RDONLY);
if (IS_ERR(migf->filp)) {
int err = PTR_ERR(migf->filp);
kfree(migf);
return ERR_PTR(err);
}
stream_open(migf->filp->f_inode, migf->filp);
mutex_init(&migf->lock);
migf->hisi_acc_vdev = hisi_acc_vdev;
ret = vf_qm_get_match_data(hisi_acc_vdev, &migf->vf_data);
if (ret) {
fput(migf->filp);
return ERR_PTR(ret);
}
return migf;
}
static struct hisi_acc_vf_migration_file *
hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct hisi_acc_vf_migration_file *migf;
migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
if (IS_ERR(migf))
return migf;
migf->total_length = QM_MATCH_SIZE;
return migf;
}
static struct hisi_acc_vf_migration_file *
hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, bool open)
{
int ret;
struct hisi_acc_vf_migration_file *migf = NULL;
if (open) {
/*
* Userspace didn't use PRECOPY support. Hence saving_migf
* is not opened yet.
*/
migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
if (IS_ERR(migf))
return migf;
} else {
migf = hisi_acc_vdev->saving_migf;
}
ret = vf_qm_state_save(hisi_acc_vdev, migf);
if (ret)
return ERR_PTR(ret);
return open ? migf : NULL;
}
static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct device *dev = &hisi_acc_vdev->vf_dev->dev;
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
int ret;
ret = vf_qm_func_stop(vf_qm);
if (ret) {
dev_err(dev, "failed to stop QM VF function!\n");
return ret;
}
ret = hisi_acc_check_int_state(hisi_acc_vdev);
if (ret) {
dev_err(dev, "failed to check QM INT state!\n");
return ret;
}
return 0;
}
static struct file *
hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
u32 new)
{
u32 cur = hisi_acc_vdev->mig_state;
int ret;
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) {
struct hisi_acc_vf_migration_file *migf;
migf = hisi_acc_vf_pre_copy(hisi_acc_vdev);
if (IS_ERR(migf))
return ERR_CAST(migf);
get_file(migf->filp);
hisi_acc_vdev->saving_migf = migf;
return migf->filp;
}
if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) {
struct hisi_acc_vf_migration_file *migf;
ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
if (ret)
return ERR_PTR(ret);
migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, false);
if (IS_ERR(migf))
return ERR_CAST(migf);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
if (ret)
return ERR_PTR(ret);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
struct hisi_acc_vf_migration_file *migf;
migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, true);
if (IS_ERR(migf))
return ERR_CAST(migf);
get_file(migf->filp);
hisi_acc_vdev->saving_migf = migf;
return migf->filp;
}
if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
hisi_acc_vf_disable_fds(hisi_acc_vdev);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
struct hisi_acc_vf_migration_file *migf;
migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
if (IS_ERR(migf))
return ERR_CAST(migf);
get_file(migf->filp);
hisi_acc_vdev->resuming_migf = migf;
return migf->filp;
}
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
ret = hisi_acc_vf_load_state(hisi_acc_vdev);
if (ret)
return ERR_PTR(ret);
hisi_acc_vf_disable_fds(hisi_acc_vdev);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) {
hisi_acc_vf_disable_fds(hisi_acc_vdev);
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
hisi_acc_vf_start_device(hisi_acc_vdev);
return NULL;
}
/*
* vfio_mig_get_next_state() does not use arcs other than the above
*/
WARN_ON(true);
return ERR_PTR(-EINVAL);
}
static struct file *
hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
enum vfio_device_mig_state new_state)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
struct hisi_acc_vf_core_device, core_device.vdev);
enum vfio_device_mig_state next_state;
struct file *res = NULL;
int ret;
mutex_lock(&hisi_acc_vdev->state_mutex);
while (new_state != hisi_acc_vdev->mig_state) {
ret = vfio_mig_get_next_state(vdev,
hisi_acc_vdev->mig_state,
new_state, &next_state);
if (ret) {
res = ERR_PTR(-EINVAL);
break;
}
res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
if (IS_ERR(res))
break;
hisi_acc_vdev->mig_state = next_state;
if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
fput(res);
res = ERR_PTR(-EINVAL);
break;
}
}
hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
return res;
}
static int
hisi_acc_vfio_pci_get_data_size(struct vfio_device *vdev,
unsigned long *stop_copy_length)
{
*stop_copy_length = sizeof(struct acc_vf_data);
return 0;
}
static int
hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
enum vfio_device_mig_state *curr_state)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
struct hisi_acc_vf_core_device, core_device.vdev);
mutex_lock(&hisi_acc_vdev->state_mutex);
*curr_state = hisi_acc_vdev->mig_state;
hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
return 0;
}
static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
if (hisi_acc_vdev->core_device.vdev.migration_flags !=
VFIO_MIGRATION_STOP_COPY)
return;
/*
* As the higher VFIO layers are holding locks across reset and using
* those same locks with the mm_lock we need to prevent ABBA deadlock
* with the state_mutex and mm_lock.
* In case the state_mutex was taken already we defer the cleanup work
* to the unlock flow of the other running context.
*/
spin_lock(&hisi_acc_vdev->reset_lock);
hisi_acc_vdev->deferred_reset = true;
if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
spin_unlock(&hisi_acc_vdev->reset_lock);
return;
}
spin_unlock(&hisi_acc_vdev->reset_lock);
hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
}
static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
{
struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
struct pci_dev *vf_dev = vdev->pdev;
/*
* ACC VF dev BAR2 region consists of both functional register space
* and migration control register space. For migration to work, we
* need access to both. Hence, we map the entire BAR2 region here.
* But unnecessarily exposing the migration BAR region to the Guest
* has the potential to prevent/corrupt the Guest migration. Hence,
* we restrict access to the migration control space from
* Guest(Please see mmap/ioctl/read/write override functions).
*
* Please note that it is OK to expose the entire VF BAR if migration
* is not supported or required as this cannot affect the ACC PF
* configurations.
*
* Also the HiSilicon ACC VF devices supported by this driver on
* HiSilicon hardware platforms are integrated end point devices
* and the platform lacks the capability to perform any PCIe P2P
* between these devices.
*/
vf_qm->io_base =
ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
if (!vf_qm->io_base)
return -EIO;
vf_qm->fun_type = QM_HW_VF;
vf_qm->pdev = vf_dev;
mutex_init(&vf_qm->mailbox_lock);
return 0;
}
static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
{
struct hisi_qm *pf_qm;
struct pci_driver *pf_driver;
if (!pdev->is_virtfn)
return NULL;
switch (pdev->device) {
case PCI_DEVICE_ID_HUAWEI_SEC_VF:
pf_driver = hisi_sec_get_pf_driver();
break;
case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
pf_driver = hisi_hpre_get_pf_driver();
break;
case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
pf_driver = hisi_zip_get_pf_driver();
break;
default:
return NULL;
}
if (!pf_driver)
return NULL;
pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
return !IS_ERR(pf_qm) ? pf_qm : NULL;
}
static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
size_t count, loff_t *ppos,
size_t *new_count)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
if (index == VFIO_PCI_BAR2_REGION_INDEX) {
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
/* Check if access is for migration control region */
if (pos >= end)
return -EINVAL;
*new_count = min(count, (size_t)(end - pos));
}
return 0;
}
static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
struct vm_area_struct *vma)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
unsigned int index;
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index == VFIO_PCI_BAR2_REGION_INDEX) {
u64 req_len, pgoff, req_start;
resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
req_len = vma->vm_end - vma->vm_start;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (req_start + req_len > end)
return -EINVAL;
}
return vfio_pci_core_mmap(core_vdev, vma);
}
static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
const char __user *buf, size_t count,
loff_t *ppos)
{
size_t new_count = count;
int ret;
ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
if (ret)
return ret;
return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
}
static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
char __user *buf, size_t count,
loff_t *ppos)
{
size_t new_count = count;
int ret;
ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
if (ret)
return ret;
return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
}
static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
unsigned long arg)
{
if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info;
unsigned long minsz;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
/*
* ACC VF dev BAR2 region consists of both functional
* register space and migration control register space.
* Report only the functional region to Guest.
*/
info.size = pci_resource_len(pdev, info.index) / 2;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_MMAP;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
}
}
return vfio_pci_core_ioctl(core_vdev, cmd, arg);
}
static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
struct hisi_acc_vf_core_device, core_device.vdev);
struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
int ret;
ret = vfio_pci_core_enable(vdev);
if (ret)
return ret;
if (core_vdev->mig_ops) {
ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
if (ret) {
vfio_pci_core_disable(vdev);
return ret;
}
hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
}
vfio_pci_core_finish_enable(vdev);
return 0;
}
static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
struct hisi_acc_vf_core_device, core_device.vdev);
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
iounmap(vf_qm->io_base);
vfio_pci_core_close_device(core_vdev);
}
static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
.migration_set_state = hisi_acc_vfio_pci_set_device_state,
.migration_get_state = hisi_acc_vfio_pci_get_device_state,
.migration_get_data_size = hisi_acc_vfio_pci_get_data_size,
};
static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
struct hisi_acc_vf_core_device, core_device.vdev);
struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
hisi_acc_vdev->pf_qm = pf_qm;
hisi_acc_vdev->vf_dev = pdev;
mutex_init(&hisi_acc_vdev->state_mutex);
core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY;
core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
return vfio_pci_core_init_dev(core_vdev);
}
static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.name = "hisi-acc-vfio-pci-migration",
.init = hisi_acc_vfio_pci_migrn_init_dev,
.release = vfio_pci_core_release_dev,
.open_device = hisi_acc_vfio_pci_open_device,
.close_device = hisi_acc_vfio_pci_close_device,
.ioctl = hisi_acc_vfio_pci_ioctl,
.device_feature = vfio_pci_core_ioctl_feature,
.read = hisi_acc_vfio_pci_read,
.write = hisi_acc_vfio_pci_write,
.mmap = hisi_acc_vfio_pci_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.name = "hisi-acc-vfio-pci",
.init = vfio_pci_core_init_dev,
.release = vfio_pci_core_release_dev,
.open_device = hisi_acc_vfio_pci_open_device,
.close_device = vfio_pci_core_close_device,
.ioctl = vfio_pci_core_ioctl,
.device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev;
const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
struct hisi_qm *pf_qm;
int vf_id;
int ret;
pf_qm = hisi_acc_get_pf_qm(pdev);
if (pf_qm && pf_qm->ver >= QM_HW_V3) {
vf_id = pci_iov_vf_id(pdev);
if (vf_id >= 0)
ops = &hisi_acc_vfio_pci_migrn_ops;
else
pci_warn(pdev, "migration support failed, continue with generic interface\n");
}
hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
core_device.vdev, &pdev->dev, ops);
if (IS_ERR(hisi_acc_vdev))
return PTR_ERR(hisi_acc_vdev);
dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
if (ret)
goto out_put_vdev;
return 0;
out_put_vdev:
vfio_put_device(&hisi_acc_vdev->core_device.vdev);
return ret;
}
static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
vfio_put_device(&hisi_acc_vdev->core_device.vdev);
}
static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
{ }
};
MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
.reset_done = hisi_acc_vf_pci_aer_reset_done,
.error_detected = vfio_pci_core_aer_err_detected,
};
static struct pci_driver hisi_acc_vfio_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = hisi_acc_vfio_pci_table,
.probe = hisi_acc_vfio_pci_probe,
.remove = hisi_acc_vfio_pci_remove,
.err_handler = &hisi_acc_vf_err_handlers,
.driver_managed_dma = true,
};
module_pci_driver(hisi_acc_vfio_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Liu Longfang <[email protected]>");
MODULE_AUTHOR("Shameer Kolothum <[email protected]>");
MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");
| linux-master | drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* File attributes for Mediated devices
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <[email protected]>
* Kirti Wankhede <[email protected]>
*/
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/mdev.h>
#include "mdev_private.h"
struct mdev_type_attribute {
struct attribute attr;
ssize_t (*show)(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf);
ssize_t (*store)(struct mdev_type *mtype,
struct mdev_type_attribute *attr, const char *buf,
size_t count);
};
#define MDEV_TYPE_ATTR_RO(_name) \
struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
#define MDEV_TYPE_ATTR_WO(_name) \
struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
static ssize_t mdev_type_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
struct mdev_type_attribute *attr = to_mdev_type_attr(__attr);
struct mdev_type *type = to_mdev_type(kobj);
ssize_t ret = -EIO;
if (attr->show)
ret = attr->show(type, attr, buf);
return ret;
}
static ssize_t mdev_type_attr_store(struct kobject *kobj,
struct attribute *__attr,
const char *buf, size_t count)
{
struct mdev_type_attribute *attr = to_mdev_type_attr(__attr);
struct mdev_type *type = to_mdev_type(kobj);
ssize_t ret = -EIO;
if (attr->store)
ret = attr->store(type, attr, buf, count);
return ret;
}
static const struct sysfs_ops mdev_type_sysfs_ops = {
.show = mdev_type_attr_show,
.store = mdev_type_attr_store,
};
static ssize_t create_store(struct mdev_type *mtype,
struct mdev_type_attribute *attr, const char *buf,
size_t count)
{
char *str;
guid_t uuid;
int ret;
if ((count < UUID_STRING_LEN) || (count > UUID_STRING_LEN + 1))
return -EINVAL;
str = kstrndup(buf, count, GFP_KERNEL);
if (!str)
return -ENOMEM;
ret = guid_parse(str, &uuid);
kfree(str);
if (ret)
return ret;
ret = mdev_device_create(mtype, &uuid);
if (ret)
return ret;
return count;
}
static MDEV_TYPE_ATTR_WO(create);
static ssize_t device_api_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n", mtype->parent->mdev_driver->device_api);
}
static MDEV_TYPE_ATTR_RO(device_api);
static ssize_t name_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
mtype->pretty_name ? mtype->pretty_name : mtype->sysfs_name);
}
static MDEV_TYPE_ATTR_RO(name);
static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
char *buf)
{
struct mdev_driver *drv = mtype->parent->mdev_driver;
if (drv->get_available)
return sysfs_emit(buf, "%u\n", drv->get_available(mtype));
return sysfs_emit(buf, "%u\n",
atomic_read(&mtype->parent->available_instances));
}
static MDEV_TYPE_ATTR_RO(available_instances);
static ssize_t description_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
char *buf)
{
return mtype->parent->mdev_driver->show_description(mtype, buf);
}
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *mdev_types_core_attrs[] = {
&mdev_type_attr_create.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_name.attr,
&mdev_type_attr_available_instances.attr,
&mdev_type_attr_description.attr,
NULL,
};
static umode_t mdev_types_core_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
if (attr == &mdev_type_attr_description.attr &&
!to_mdev_type(kobj)->parent->mdev_driver->show_description)
return 0;
return attr->mode;
}
static struct attribute_group mdev_type_core_group = {
.attrs = mdev_types_core_attrs,
.is_visible = mdev_types_core_is_visible,
};
static const struct attribute_group *mdev_type_groups[] = {
&mdev_type_core_group,
NULL,
};
static void mdev_type_release(struct kobject *kobj)
{
struct mdev_type *type = to_mdev_type(kobj);
pr_debug("Releasing group %s\n", kobj->name);
/* Pairs with the get in add_mdev_supported_type() */
put_device(type->parent->dev);
}
static struct kobj_type mdev_type_ktype = {
.sysfs_ops = &mdev_type_sysfs_ops,
.release = mdev_type_release,
.default_groups = mdev_type_groups,
};
static int mdev_type_add(struct mdev_parent *parent, struct mdev_type *type)
{
int ret;
type->kobj.kset = parent->mdev_types_kset;
type->parent = parent;
/* Pairs with the put in mdev_type_release() */
get_device(parent->dev);
ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
"%s-%s", dev_driver_string(parent->dev),
type->sysfs_name);
if (ret) {
kobject_put(&type->kobj);
return ret;
}
type->devices_kobj = kobject_create_and_add("devices", &type->kobj);
if (!type->devices_kobj) {
ret = -ENOMEM;
goto attr_devices_failed;
}
return 0;
attr_devices_failed:
kobject_del(&type->kobj);
kobject_put(&type->kobj);
return ret;
}
static void mdev_type_remove(struct mdev_type *type)
{
kobject_put(type->devices_kobj);
kobject_del(&type->kobj);
kobject_put(&type->kobj);
}
/* mdev sysfs functions */
void parent_remove_sysfs_files(struct mdev_parent *parent)
{
int i;
for (i = 0; i < parent->nr_types; i++)
mdev_type_remove(parent->types[i]);
kset_unregister(parent->mdev_types_kset);
}
int parent_create_sysfs_files(struct mdev_parent *parent)
{
int ret, i;
parent->mdev_types_kset = kset_create_and_add("mdev_supported_types",
NULL, &parent->dev->kobj);
if (!parent->mdev_types_kset)
return -ENOMEM;
for (i = 0; i < parent->nr_types; i++) {
ret = mdev_type_add(parent, parent->types[i]);
if (ret)
goto out_err;
}
return 0;
out_err:
while (--i >= 0)
mdev_type_remove(parent->types[i]);
return 0;
}
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mdev_device *mdev = to_mdev_device(dev);
unsigned long val;
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val && device_remove_file_self(dev, attr)) {
int ret;
ret = mdev_device_remove(mdev);
if (ret)
return ret;
}
return count;
}
static DEVICE_ATTR_WO(remove);
static struct attribute *mdev_device_attrs[] = {
&dev_attr_remove.attr,
NULL,
};
static const struct attribute_group mdev_device_group = {
.attrs = mdev_device_attrs,
};
const struct attribute_group *mdev_device_groups[] = {
&mdev_device_group,
NULL
};
int mdev_create_sysfs_files(struct mdev_device *mdev)
{
struct mdev_type *type = mdev->type;
struct kobject *kobj = &mdev->dev.kobj;
int ret;
ret = sysfs_create_link(type->devices_kobj, kobj, dev_name(&mdev->dev));
if (ret)
return ret;
ret = sysfs_create_link(kobj, &type->kobj, "mdev_type");
if (ret)
goto type_link_failed;
return ret;
type_link_failed:
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
return ret;
}
void mdev_remove_sysfs_files(struct mdev_device *mdev)
{
struct kobject *kobj = &mdev->dev.kobj;
sysfs_remove_link(kobj, "mdev_type");
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
}
| linux-master | drivers/vfio/mdev/mdev_sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MDEV driver
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <[email protected]>
* Kirti Wankhede <[email protected]>
*/
#include <linux/iommu.h>
#include <linux/mdev.h>
#include "mdev_private.h"
static int mdev_probe(struct device *dev)
{
struct mdev_driver *drv =
container_of(dev->driver, struct mdev_driver, driver);
if (!drv->probe)
return 0;
return drv->probe(to_mdev_device(dev));
}
static void mdev_remove(struct device *dev)
{
struct mdev_driver *drv =
container_of(dev->driver, struct mdev_driver, driver);
if (drv->remove)
drv->remove(to_mdev_device(dev));
}
static int mdev_match(struct device *dev, struct device_driver *drv)
{
/*
* No drivers automatically match. Drivers are only bound by explicit
* device_driver_attach()
*/
return 0;
}
struct bus_type mdev_bus_type = {
.name = "mdev",
.probe = mdev_probe,
.remove = mdev_remove,
.match = mdev_match,
};
/**
* mdev_register_driver - register a new MDEV driver
* @drv: the driver to register
*
* Returns a negative value on error, otherwise 0.
**/
int mdev_register_driver(struct mdev_driver *drv)
{
if (!drv->device_api)
return -EINVAL;
/* initialize common driver fields */
drv->driver.bus = &mdev_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(mdev_register_driver);
/*
* mdev_unregister_driver - unregister MDEV driver
* @drv: the driver to unregister
*/
void mdev_unregister_driver(struct mdev_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(mdev_unregister_driver);
| linux-master | drivers/vfio/mdev/mdev_driver.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Mediated device Core Driver
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <[email protected]>
* Kirti Wankhede <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/mdev.h>
#include "mdev_private.h"
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "NVIDIA Corporation"
#define DRIVER_DESC "Mediated device Core Driver"
static struct class_compat *mdev_bus_compat_class;
static LIST_HEAD(mdev_list);
static DEFINE_MUTEX(mdev_list_lock);
/* Caller must hold parent unreg_sem read or write lock */
static void mdev_device_remove_common(struct mdev_device *mdev)
{
struct mdev_parent *parent = mdev->type->parent;
mdev_remove_sysfs_files(mdev);
device_del(&mdev->dev);
lockdep_assert_held(&parent->unreg_sem);
/* Balances with device_initialize() */
put_device(&mdev->dev);
}
static int mdev_device_remove_cb(struct device *dev, void *data)
{
if (dev->bus == &mdev_bus_type)
mdev_device_remove_common(to_mdev_device(dev));
return 0;
}
/*
* mdev_register_parent: Register a device as parent for mdevs
* @parent: parent structure registered
* @dev: device structure representing parent device.
* @mdev_driver: Device driver to bind to the newly created mdev
* @types: Array of supported mdev types
* @nr_types: Number of entries in @types
*
* Registers the @parent stucture as a parent for mdev types and thus mdev
* devices. The caller needs to hold a reference on @dev that must not be
* released until after the call to mdev_unregister_parent().
*
* Returns a negative value on error, otherwise 0.
*/
int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
struct mdev_driver *mdev_driver, struct mdev_type **types,
unsigned int nr_types)
{
char *env_string = "MDEV_STATE=registered";
char *envp[] = { env_string, NULL };
int ret;
memset(parent, 0, sizeof(*parent));
init_rwsem(&parent->unreg_sem);
parent->dev = dev;
parent->mdev_driver = mdev_driver;
parent->types = types;
parent->nr_types = nr_types;
atomic_set(&parent->available_instances, mdev_driver->max_instances);
ret = parent_create_sysfs_files(parent);
if (ret)
return ret;
ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL);
if (ret)
dev_warn(dev, "Failed to create compatibility class link\n");
dev_info(dev, "MDEV: Registered\n");
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
return 0;
}
EXPORT_SYMBOL(mdev_register_parent);
/*
* mdev_unregister_parent : Unregister a parent device
* @parent: parent structure to unregister
*/
void mdev_unregister_parent(struct mdev_parent *parent)
{
char *env_string = "MDEV_STATE=unregistered";
char *envp[] = { env_string, NULL };
dev_info(parent->dev, "MDEV: Unregistering\n");
down_write(&parent->unreg_sem);
class_compat_remove_link(mdev_bus_compat_class, parent->dev, NULL);
device_for_each_child(parent->dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
up_write(&parent->unreg_sem);
kobject_uevent_env(&parent->dev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(mdev_unregister_parent);
static void mdev_device_release(struct device *dev)
{
struct mdev_device *mdev = to_mdev_device(dev);
struct mdev_parent *parent = mdev->type->parent;
mutex_lock(&mdev_list_lock);
list_del(&mdev->next);
if (!parent->mdev_driver->get_available)
atomic_inc(&parent->available_instances);
mutex_unlock(&mdev_list_lock);
/* Pairs with the get in mdev_device_create() */
kobject_put(&mdev->type->kobj);
dev_dbg(&mdev->dev, "MDEV: destroying\n");
kfree(mdev);
}
int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
{
int ret;
struct mdev_device *mdev, *tmp;
struct mdev_parent *parent = type->parent;
struct mdev_driver *drv = parent->mdev_driver;
mutex_lock(&mdev_list_lock);
/* Check for duplicate */
list_for_each_entry(tmp, &mdev_list, next) {
if (guid_equal(&tmp->uuid, uuid)) {
mutex_unlock(&mdev_list_lock);
return -EEXIST;
}
}
if (!drv->get_available) {
/*
* Note: that non-atomic read and dec is fine here because
* all modifications are under mdev_list_lock.
*/
if (!atomic_read(&parent->available_instances)) {
mutex_unlock(&mdev_list_lock);
return -EUSERS;
}
atomic_dec(&parent->available_instances);
}
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
mutex_unlock(&mdev_list_lock);
return -ENOMEM;
}
device_initialize(&mdev->dev);
mdev->dev.parent = parent->dev;
mdev->dev.bus = &mdev_bus_type;
mdev->dev.release = mdev_device_release;
mdev->dev.groups = mdev_device_groups;
mdev->type = type;
/* Pairs with the put in mdev_device_release() */
kobject_get(&type->kobj);
guid_copy(&mdev->uuid, uuid);
list_add(&mdev->next, &mdev_list);
mutex_unlock(&mdev_list_lock);
ret = dev_set_name(&mdev->dev, "%pUl", uuid);
if (ret)
goto out_put_device;
/* Check if parent unregistration has started */
if (!down_read_trylock(&parent->unreg_sem)) {
ret = -ENODEV;
goto out_put_device;
}
ret = device_add(&mdev->dev);
if (ret)
goto out_unlock;
ret = device_driver_attach(&drv->driver, &mdev->dev);
if (ret)
goto out_del;
ret = mdev_create_sysfs_files(mdev);
if (ret)
goto out_del;
mdev->active = true;
dev_dbg(&mdev->dev, "MDEV: created\n");
up_read(&parent->unreg_sem);
return 0;
out_del:
device_del(&mdev->dev);
out_unlock:
up_read(&parent->unreg_sem);
out_put_device:
put_device(&mdev->dev);
return ret;
}
int mdev_device_remove(struct mdev_device *mdev)
{
struct mdev_device *tmp;
struct mdev_parent *parent = mdev->type->parent;
mutex_lock(&mdev_list_lock);
list_for_each_entry(tmp, &mdev_list, next) {
if (tmp == mdev)
break;
}
if (tmp != mdev) {
mutex_unlock(&mdev_list_lock);
return -ENODEV;
}
if (!mdev->active) {
mutex_unlock(&mdev_list_lock);
return -EAGAIN;
}
mdev->active = false;
mutex_unlock(&mdev_list_lock);
/* Check if parent unregistration has started */
if (!down_read_trylock(&parent->unreg_sem))
return -ENODEV;
mdev_device_remove_common(mdev);
up_read(&parent->unreg_sem);
return 0;
}
static int __init mdev_init(void)
{
int ret;
ret = bus_register(&mdev_bus_type);
if (ret)
return ret;
mdev_bus_compat_class = class_compat_register("mdev_bus");
if (!mdev_bus_compat_class) {
bus_unregister(&mdev_bus_type);
return -ENOMEM;
}
return 0;
}
static void __exit mdev_exit(void)
{
class_compat_unregister(mdev_bus_compat_class);
bus_unregister(&mdev_bus_type);
}
subsys_initcall(mdev_init)
module_exit(mdev_exit)
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/mdev/mdev_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
*/
#include <linux/vfio.h>
#include <linux/cdx/cdx_bus.h>
#include "private.h"
static int vfio_cdx_open_device(struct vfio_device *core_vdev)
{
struct vfio_cdx_device *vdev =
container_of(core_vdev, struct vfio_cdx_device, vdev);
struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
int count = cdx_dev->res_count;
int i;
vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
GFP_KERNEL_ACCOUNT);
if (!vdev->regions)
return -ENOMEM;
for (i = 0; i < count; i++) {
struct resource *res = &cdx_dev->res[i];
vdev->regions[i].addr = res->start;
vdev->regions[i].size = resource_size(res);
vdev->regions[i].type = res->flags;
/*
* Only regions addressed with PAGE granularity may be
* MMAP'ed securely.
*/
if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
!(vdev->regions[i].size & ~PAGE_MASK))
vdev->regions[i].flags |=
VFIO_REGION_INFO_FLAG_MMAP;
vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
}
return 0;
}
static void vfio_cdx_close_device(struct vfio_device *core_vdev)
{
struct vfio_cdx_device *vdev =
container_of(core_vdev, struct vfio_cdx_device, vdev);
kfree(vdev->regions);
cdx_dev_reset(core_vdev->dev);
}
static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
struct vfio_device_info __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
struct vfio_device_info info;
if (copy_from_user(&info, arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
info.flags = VFIO_DEVICE_FLAGS_CDX;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = cdx_dev->res_count;
info.num_irqs = 0;
return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
}
static int vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device *vdev,
struct vfio_region_info __user *arg)
{
unsigned long minsz = offsetofend(struct vfio_region_info, offset);
struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
struct vfio_region_info info;
if (copy_from_user(&info, arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (info.index >= cdx_dev->res_count)
return -EINVAL;
/* map offset to the physical address */
info.offset = vfio_cdx_index_to_offset(info.index);
info.size = vdev->regions[info.index].size;
info.flags = vdev->regions[info.index].flags;
return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
}
static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
unsigned int cmd, unsigned long arg)
{
struct vfio_cdx_device *vdev =
container_of(core_vdev, struct vfio_cdx_device, vdev);
void __user *uarg = (void __user *)arg;
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
return vfio_cdx_ioctl_get_info(vdev, uarg);
case VFIO_DEVICE_GET_REGION_INFO:
return vfio_cdx_ioctl_get_region_info(vdev, uarg);
case VFIO_DEVICE_RESET:
return cdx_dev_reset(core_vdev->dev);
default:
return -ENOTTY;
}
}
static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
struct vm_area_struct *vma)
{
u64 size = vma->vm_end - vma->vm_start;
u64 pgoff, base;
pgoff = vma->vm_pgoff &
((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
base = pgoff << PAGE_SHIFT;
if (base + size > region.size)
return -EINVAL;
vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
size, vma->vm_page_prot);
}
static int vfio_cdx_mmap(struct vfio_device *core_vdev,
struct vm_area_struct *vma)
{
struct vfio_cdx_device *vdev =
container_of(core_vdev, struct vfio_cdx_device, vdev);
struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
unsigned int index;
index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
if (index >= cdx_dev->res_count)
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
(vma->vm_flags & VM_READ))
return -EPERM;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
(vma->vm_flags & VM_WRITE))
return -EPERM;
return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
}
static const struct vfio_device_ops vfio_cdx_ops = {
.name = "vfio-cdx",
.open_device = vfio_cdx_open_device,
.close_device = vfio_cdx_close_device,
.ioctl = vfio_cdx_ioctl,
.mmap = vfio_cdx_mmap,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
};
static int vfio_cdx_probe(struct cdx_device *cdx_dev)
{
struct vfio_cdx_device *vdev;
struct device *dev = &cdx_dev->dev;
int ret;
vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
&vfio_cdx_ops);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret)
goto out_uninit;
dev_set_drvdata(dev, vdev);
return 0;
out_uninit:
vfio_put_device(&vdev->vdev);
return ret;
}
static int vfio_cdx_remove(struct cdx_device *cdx_dev)
{
struct device *dev = &cdx_dev->dev;
struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
vfio_unregister_group_dev(&vdev->vdev);
vfio_put_device(&vdev->vdev);
return 0;
}
static const struct cdx_device_id vfio_cdx_table[] = {
{ CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
{}
};
MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
static struct cdx_driver vfio_cdx_driver = {
.probe = vfio_cdx_probe,
.remove = vfio_cdx_remove,
.match_id_table = vfio_cdx_table,
.driver = {
.name = "vfio-cdx",
},
.driver_managed_dma = true,
};
module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
| linux-master | drivers/vfio/cdx/main.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2017,2019-2020 NXP
*/
#include <linux/device.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vfio.h>
#include <linux/fsl/mc.h>
#include <linux/delay.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include "vfio_fsl_mc_private.h"
static struct fsl_mc_driver vfio_fsl_mc_driver;
static int vfio_fsl_mc_open_device(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int count = mc_dev->obj_desc.region_count;
int i;
vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
GFP_KERNEL_ACCOUNT);
if (!vdev->regions)
return -ENOMEM;
for (i = 0; i < count; i++) {
struct resource *res = &mc_dev->regions[i];
int no_mmap = is_fsl_mc_bus_dprc(mc_dev);
vdev->regions[i].addr = res->start;
vdev->regions[i].size = resource_size(res);
vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS;
/*
* Only regions addressed with PAGE granularity may be
* MMAPed securely.
*/
if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) &&
!(vdev->regions[i].size & ~PAGE_MASK))
vdev->regions[i].flags |=
VFIO_REGION_INFO_FLAG_MMAP;
vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
}
return 0;
}
static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int i;
for (i = 0; i < mc_dev->obj_desc.region_count; i++)
iounmap(vdev->regions[i].ioaddr);
kfree(vdev->regions);
}
static int vfio_fsl_mc_reset_device(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int ret = 0;
if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
return dprc_reset_container(mc_dev->mc_io, 0,
mc_dev->mc_handle,
mc_dev->obj_desc.id,
DPRC_RESET_OPTION_NON_RECURSIVE);
} else {
u16 token;
ret = fsl_mc_obj_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
mc_dev->obj_desc.type,
&token);
if (ret)
goto out;
ret = fsl_mc_obj_reset(mc_dev->mc_io, 0, token);
if (ret) {
fsl_mc_obj_close(mc_dev->mc_io, 0, token);
goto out;
}
ret = fsl_mc_obj_close(mc_dev->mc_io, 0, token);
}
out:
return ret;
}
static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
int ret;
vfio_fsl_mc_regions_cleanup(vdev);
/* reset the device before cleaning up the interrupts */
ret = vfio_fsl_mc_reset_device(vdev);
if (ret)
dev_warn(&mc_cont->dev,
"VFIO_FSL_MC: reset device has failed (%d)\n", ret);
vfio_fsl_mc_irqs_cleanup(vdev);
fsl_mc_cleanup_irq_pool(mc_cont);
}
static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
unsigned int cmd, unsigned long arg)
{
unsigned long minsz;
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
{
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
if (is_fsl_mc_bus_dprc(mc_dev))
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = mc_dev->obj_desc.region_count;
info.num_irqs = mc_dev->obj_desc.irq_count;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (info.index >= mc_dev->obj_desc.region_count)
return -EINVAL;
/* map offset to the physical address */
info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
info.size = vdev->regions[info.index].size;
info.flags = vdev->regions[info.index].flags;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (info.index >= mc_dev->obj_desc.irq_count)
return -EINVAL;
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = 1;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_SET_IRQS:
{
struct vfio_irq_set hdr;
u8 *data = NULL;
int ret = 0;
size_t data_size = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count,
mc_dev->obj_desc.irq_count, &data_size);
if (ret)
return ret;
if (data_size) {
data = memdup_user((void __user *)(arg + minsz),
data_size);
if (IS_ERR(data))
return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
hdr.index, hdr.start,
hdr.count, data);
mutex_unlock(&vdev->igate);
kfree(data);
return ret;
}
case VFIO_DEVICE_RESET:
{
return vfio_fsl_mc_reset_device(vdev);
}
default:
return -ENOTTY;
}
}
static ssize_t vfio_fsl_mc_read(struct vfio_device *core_vdev, char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
struct fsl_mc_device *mc_dev = vdev->mc_dev;
struct vfio_fsl_mc_region *region;
u64 data[8];
int i;
if (index >= mc_dev->obj_desc.region_count)
return -EINVAL;
region = &vdev->regions[index];
if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
return -EINVAL;
if (!region->ioaddr) {
region->ioaddr = ioremap(region->addr, region->size);
if (!region->ioaddr)
return -ENOMEM;
}
if (count != 64 || off != 0)
return -EINVAL;
for (i = 7; i >= 0; i--)
data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
if (copy_to_user(buf, data, 64))
return -EFAULT;
return count;
}
#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
{
int i;
enum mc_cmd_status status;
unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
/* Write at command parameter into portal */
for (i = 7; i >= 1; i--)
writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
/* Write command header in the end */
writeq(cmd_data[0], ioaddr);
/* Wait for response before returning to user-space
* This can be optimized in future to even prepare response
* before returning to user-space and avoid read ioctl.
*/
for (;;) {
u64 header;
struct mc_cmd_header *resp_hdr;
header = cpu_to_le64(readq_relaxed(ioaddr));
resp_hdr = (struct mc_cmd_header *)&header;
status = (enum mc_cmd_status)resp_hdr->status;
if (status != MC_CMD_STATUS_READY)
break;
udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
if (timeout_usecs == 0)
return -ETIMEDOUT;
}
return 0;
}
static ssize_t vfio_fsl_mc_write(struct vfio_device *core_vdev,
const char __user *buf, size_t count,
loff_t *ppos)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
struct fsl_mc_device *mc_dev = vdev->mc_dev;
struct vfio_fsl_mc_region *region;
u64 data[8];
int ret;
if (index >= mc_dev->obj_desc.region_count)
return -EINVAL;
region = &vdev->regions[index];
if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
return -EINVAL;
if (!region->ioaddr) {
region->ioaddr = ioremap(region->addr, region->size);
if (!region->ioaddr)
return -ENOMEM;
}
if (count != 64 || off != 0)
return -EINVAL;
if (copy_from_user(&data, buf, 64))
return -EFAULT;
ret = vfio_fsl_mc_send_command(region->ioaddr, data);
if (ret)
return ret;
return count;
}
static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
struct vm_area_struct *vma)
{
u64 size = vma->vm_end - vma->vm_start;
u64 pgoff, base;
u8 region_cacheable;
pgoff = vma->vm_pgoff &
((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
base = pgoff << PAGE_SHIFT;
if (region.size < PAGE_SIZE || base + size > region.size)
return -EINVAL;
region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) &&
(region.type & FSL_MC_REGION_SHAREABLE);
if (!region_cacheable)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
size, vma->vm_page_prot);
}
static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,
struct vm_area_struct *vma)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
unsigned int index;
index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if (vma->vm_start & ~PAGE_MASK)
return -EINVAL;
if (vma->vm_end & ~PAGE_MASK)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
if (index >= mc_dev->obj_desc.region_count)
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
&& (vma->vm_flags & VM_READ))
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
&& (vma->vm_flags & VM_WRITE))
return -EINVAL;
vma->vm_private_data = mc_dev;
return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
}
static const struct vfio_device_ops vfio_fsl_mc_ops;
static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct vfio_fsl_mc_device *vdev = container_of(nb,
struct vfio_fsl_mc_device, nb);
struct device *dev = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
if (action == BUS_NOTIFY_ADD_DEVICE &&
vdev->mc_dev == mc_cont) {
mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
vfio_fsl_mc_ops.name);
if (!mc_dev->driver_override)
dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
dev_name(&mc_cont->dev));
else
dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
dev_name(&mc_cont->dev));
} else if (action == BUS_NOTIFY_BOUND_DRIVER &&
vdev->mc_dev == mc_cont) {
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
if (mc_drv && mc_drv != &vfio_fsl_mc_driver)
dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
dev_name(dev), mc_drv->driver.name);
}
return 0;
}
static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int ret;
/* Non-dprc devices share mc_io from parent */
if (!is_fsl_mc_bus_dprc(mc_dev)) {
struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
mc_dev->mc_io = mc_cont->mc_io;
return 0;
}
vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier;
ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb);
if (ret)
return ret;
/* open DPRC, allocate a MC portal */
ret = dprc_setup(mc_dev);
if (ret) {
dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
goto out_nc_unreg;
}
return 0;
out_nc_unreg:
bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
return ret;
}
static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
{
int ret;
/* non dprc devices do not scan for other devices */
if (!is_fsl_mc_bus_dprc(mc_dev))
return 0;
ret = dprc_scan_container(mc_dev, false);
if (ret) {
dev_err(&mc_dev->dev,
"VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
dprc_remove_devices(mc_dev, NULL, 0);
return ret;
}
return 0;
}
static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
if (!is_fsl_mc_bus_dprc(mc_dev))
return;
dprc_cleanup(mc_dev);
bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
}
static int vfio_fsl_mc_init_dev(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
struct fsl_mc_device *mc_dev = to_fsl_mc_device(core_vdev->dev);
int ret;
vdev->mc_dev = mc_dev;
mutex_init(&vdev->igate);
if (is_fsl_mc_bus_dprc(mc_dev))
ret = vfio_assign_device_set(core_vdev, &mc_dev->dev);
else
ret = vfio_assign_device_set(core_vdev, mc_dev->dev.parent);
if (ret)
return ret;
/* device_set is released by vfio core if @init fails */
return vfio_fsl_mc_init_device(vdev);
}
static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
{
struct vfio_fsl_mc_device *vdev;
struct device *dev = &mc_dev->dev;
int ret;
vdev = vfio_alloc_device(vfio_fsl_mc_device, vdev, dev,
&vfio_fsl_mc_ops);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret) {
dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
goto out_put_vdev;
}
ret = vfio_fsl_mc_scan_container(mc_dev);
if (ret)
goto out_group_dev;
dev_set_drvdata(dev, vdev);
return 0;
out_group_dev:
vfio_unregister_group_dev(&vdev->vdev);
out_put_vdev:
vfio_put_device(&vdev->vdev);
return ret;
}
static void vfio_fsl_mc_release_dev(struct vfio_device *core_vdev)
{
struct vfio_fsl_mc_device *vdev =
container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
vfio_fsl_uninit_device(vdev);
mutex_destroy(&vdev->igate);
}
static void vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
struct vfio_fsl_mc_device *vdev = dev_get_drvdata(dev);
vfio_unregister_group_dev(&vdev->vdev);
dprc_remove_devices(mc_dev, NULL, 0);
vfio_put_device(&vdev->vdev);
}
static const struct vfio_device_ops vfio_fsl_mc_ops = {
.name = "vfio-fsl-mc",
.init = vfio_fsl_mc_init_dev,
.release = vfio_fsl_mc_release_dev,
.open_device = vfio_fsl_mc_open_device,
.close_device = vfio_fsl_mc_close_device,
.ioctl = vfio_fsl_mc_ioctl,
.read = vfio_fsl_mc_read,
.write = vfio_fsl_mc_write,
.mmap = vfio_fsl_mc_mmap,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static struct fsl_mc_driver vfio_fsl_mc_driver = {
.probe = vfio_fsl_mc_probe,
.remove = vfio_fsl_mc_remove,
.driver = {
.name = "vfio-fsl-mc",
},
.driver_managed_dma = true,
};
module_fsl_mc_driver(vfio_fsl_mc_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");
| linux-master | drivers/vfio/fsl-mc/vfio_fsl_mc.c |
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2019 NXP
*/
#include <linux/vfio.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/eventfd.h>
#include "linux/fsl/mc.h"
#include "vfio_fsl_mc_private.h"
static int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
struct vfio_fsl_mc_irq *mc_irq;
int irq_count;
int ret, i;
/* Device does not support any interrupt */
if (mc_dev->obj_desc.irq_count == 0)
return 0;
/* interrupts were already allocated for this device */
if (vdev->mc_irqs)
return 0;
irq_count = mc_dev->obj_desc.irq_count;
mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL_ACCOUNT);
if (!mc_irq)
return -ENOMEM;
/* Allocate IRQs */
ret = fsl_mc_allocate_irqs(mc_dev);
if (ret) {
kfree(mc_irq);
return ret;
}
for (i = 0; i < irq_count; i++) {
mc_irq[i].count = 1;
mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
}
vdev->mc_irqs = mc_irq;
return 0;
}
static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
{
struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
eventfd_signal(mc_irq->trigger, 1);
return IRQ_HANDLED;
}
static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
int index, int fd)
{
struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
struct eventfd_ctx *trigger;
int hwirq;
int ret;
hwirq = vdev->mc_dev->irqs[index]->virq;
if (irq->trigger) {
free_irq(hwirq, irq);
kfree(irq->name);
eventfd_ctx_put(irq->trigger);
irq->trigger = NULL;
}
if (fd < 0) /* Disable only */
return 0;
irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)",
hwirq, dev_name(&vdev->mc_dev->dev));
if (!irq->name)
return -ENOMEM;
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
kfree(irq->name);
return PTR_ERR(trigger);
}
irq->trigger = trigger;
ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
irq->name, irq);
if (ret) {
kfree(irq->name);
eventfd_ctx_put(trigger);
irq->trigger = NULL;
return ret;
}
return 0;
}
static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
unsigned int index, unsigned int start,
unsigned int count, u32 flags,
void *data)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int ret, hwirq;
struct vfio_fsl_mc_irq *irq;
struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
return vfio_set_trigger(vdev, index, -1);
if (start != 0 || count != 1)
return -EINVAL;
mutex_lock(&vdev->vdev.dev_set->lock);
ret = fsl_mc_populate_irq_pool(mc_cont,
FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
if (ret)
goto unlock;
ret = vfio_fsl_mc_irqs_allocate(vdev);
if (ret)
goto unlock;
mutex_unlock(&vdev->vdev.dev_set->lock);
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
s32 fd = *(s32 *)data;
return vfio_set_trigger(vdev, index, fd);
}
hwirq = vdev->mc_dev->irqs[index]->virq;
irq = &vdev->mc_irqs[index];
if (flags & VFIO_IRQ_SET_DATA_NONE) {
vfio_fsl_mc_irq_handler(hwirq, irq);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
u8 trigger = *(u8 *)data;
if (trigger)
vfio_fsl_mc_irq_handler(hwirq, irq);
}
return 0;
unlock:
mutex_unlock(&vdev->vdev.dev_set->lock);
return ret;
}
int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
u32 flags, unsigned int index,
unsigned int start, unsigned int count,
void *data)
{
if (flags & VFIO_IRQ_SET_ACTION_TRIGGER)
return vfio_fsl_mc_set_irq_trigger(vdev, index, start,
count, flags, data);
else
return -EINVAL;
}
/* Free All IRQs for the given MC object */
void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int irq_count = mc_dev->obj_desc.irq_count;
int i;
/*
* Device does not support any interrupt or the interrupts
* were not configured
*/
if (!vdev->mc_irqs)
return;
for (i = 0; i < irq_count; i++)
vfio_set_trigger(vdev, i, -1);
fsl_mc_free_irqs(mc_dev);
kfree(vdev->mc_irqs);
vdev->mc_irqs = NULL;
}
| linux-master | drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 - Virtual Open Systems
* Author: Antonios Motakis <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vfio.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include "vfio_platform_private.h"
#define DRIVER_VERSION "0.10"
#define DRIVER_AUTHOR "Antonios Motakis <[email protected]>"
#define DRIVER_DESC "VFIO for platform devices - User Level meta-driver"
static bool reset_required = true;
module_param(reset_required, bool, 0444);
MODULE_PARM_DESC(reset_required, "override reset requirement (default: 1)");
/* probing devices from the linux platform bus */
static struct resource *get_platform_resource(struct vfio_platform_device *vdev,
int num)
{
struct platform_device *dev = (struct platform_device *) vdev->opaque;
return platform_get_mem_or_io(dev, num);
}
static int get_platform_irq(struct vfio_platform_device *vdev, int i)
{
struct platform_device *pdev = (struct platform_device *) vdev->opaque;
return platform_get_irq_optional(pdev, i);
}
static int vfio_platform_init_dev(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
struct platform_device *pdev = to_platform_device(core_vdev->dev);
vdev->opaque = (void *) pdev;
vdev->name = pdev->name;
vdev->flags = VFIO_DEVICE_FLAGS_PLATFORM;
vdev->get_resource = get_platform_resource;
vdev->get_irq = get_platform_irq;
vdev->reset_required = reset_required;
return vfio_platform_init_common(vdev);
}
static const struct vfio_device_ops vfio_platform_ops;
static int vfio_platform_probe(struct platform_device *pdev)
{
struct vfio_platform_device *vdev;
int ret;
vdev = vfio_alloc_device(vfio_platform_device, vdev, &pdev->dev,
&vfio_platform_ops);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
ret = vfio_register_group_dev(&vdev->vdev);
if (ret)
goto out_put_vdev;
pm_runtime_enable(&pdev->dev);
dev_set_drvdata(&pdev->dev, vdev);
return 0;
out_put_vdev:
vfio_put_device(&vdev->vdev);
return ret;
}
static void vfio_platform_release_dev(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
vfio_platform_release_common(vdev);
}
static int vfio_platform_remove(struct platform_device *pdev)
{
struct vfio_platform_device *vdev = dev_get_drvdata(&pdev->dev);
vfio_unregister_group_dev(&vdev->vdev);
pm_runtime_disable(vdev->device);
vfio_put_device(&vdev->vdev);
return 0;
}
static const struct vfio_device_ops vfio_platform_ops = {
.name = "vfio-platform",
.init = vfio_platform_init_dev,
.release = vfio_platform_release_dev,
.open_device = vfio_platform_open_device,
.close_device = vfio_platform_close_device,
.ioctl = vfio_platform_ioctl,
.read = vfio_platform_read,
.write = vfio_platform_write,
.mmap = vfio_platform_mmap,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static struct platform_driver vfio_platform_driver = {
.probe = vfio_platform_probe,
.remove = vfio_platform_remove,
.driver = {
.name = "vfio-platform",
},
.driver_managed_dma = true,
};
module_platform_driver(vfio_platform_driver);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/platform/vfio_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 - Virtual Open Systems
* Author: Antonios Motakis <[email protected]>
*/
#define dev_fmt(fmt) "VFIO: " fmt
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include "vfio_platform_private.h"
#define DRIVER_VERSION "0.10"
#define DRIVER_AUTHOR "Antonios Motakis <[email protected]>"
#define DRIVER_DESC "VFIO platform base module"
#define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
static LIST_HEAD(reset_list);
static DEFINE_MUTEX(driver_lock);
static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
struct module **module)
{
struct vfio_platform_reset_node *iter;
vfio_platform_reset_fn_t reset_fn = NULL;
mutex_lock(&driver_lock);
list_for_each_entry(iter, &reset_list, link) {
if (!strcmp(iter->compat, compat) &&
try_module_get(iter->owner)) {
*module = iter->owner;
reset_fn = iter->of_reset;
break;
}
}
mutex_unlock(&driver_lock);
return reset_fn;
}
static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
struct device *dev)
{
struct acpi_device *adev;
if (acpi_disabled)
return -ENOENT;
adev = ACPI_COMPANION(dev);
if (!adev) {
dev_err(dev, "ACPI companion device not found for %s\n",
vdev->name);
return -ENODEV;
}
#ifdef CONFIG_ACPI
vdev->acpihid = acpi_device_hid(adev);
#endif
return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
}
static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
const char **extra_dbg)
{
#ifdef CONFIG_ACPI
struct device *dev = vdev->device;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_status acpi_ret;
acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL);
if (ACPI_FAILURE(acpi_ret)) {
if (extra_dbg)
*extra_dbg = acpi_format_exception(acpi_ret);
return -EINVAL;
}
return 0;
#else
return -ENOENT;
#endif
}
static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
{
#ifdef CONFIG_ACPI
struct device *dev = vdev->device;
acpi_handle handle = ACPI_HANDLE(dev);
return acpi_has_method(handle, "_RST");
#else
return false;
#endif
}
static bool vfio_platform_has_reset(struct vfio_platform_device *vdev)
{
if (VFIO_PLATFORM_IS_ACPI(vdev))
return vfio_platform_acpi_has_reset(vdev);
return vdev->of_reset ? true : false;
}
static int vfio_platform_get_reset(struct vfio_platform_device *vdev)
{
if (VFIO_PLATFORM_IS_ACPI(vdev))
return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT;
vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
&vdev->reset_module);
if (!vdev->of_reset) {
request_module("vfio-reset:%s", vdev->compat);
vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
&vdev->reset_module);
}
return vdev->of_reset ? 0 : -ENOENT;
}
static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
{
if (VFIO_PLATFORM_IS_ACPI(vdev))
return;
if (vdev->of_reset)
module_put(vdev->reset_module);
}
static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
{
int cnt = 0, i;
while (vdev->get_resource(vdev, cnt))
cnt++;
vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
GFP_KERNEL_ACCOUNT);
if (!vdev->regions)
return -ENOMEM;
for (i = 0; i < cnt; i++) {
struct resource *res =
vdev->get_resource(vdev, i);
vdev->regions[i].addr = res->start;
vdev->regions[i].size = resource_size(res);
vdev->regions[i].flags = 0;
switch (resource_type(res)) {
case IORESOURCE_MEM:
vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
if (!(res->flags & IORESOURCE_READONLY))
vdev->regions[i].flags |=
VFIO_REGION_INFO_FLAG_WRITE;
/*
* Only regions addressed with PAGE granularity may be
* MMAPed securely.
*/
if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
!(vdev->regions[i].size & ~PAGE_MASK))
vdev->regions[i].flags |=
VFIO_REGION_INFO_FLAG_MMAP;
break;
case IORESOURCE_IO:
vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
break;
default:
goto err;
}
}
vdev->num_regions = cnt;
return 0;
err:
kfree(vdev->regions);
return -EINVAL;
}
static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
{
int i;
for (i = 0; i < vdev->num_regions; i++)
iounmap(vdev->regions[i].ioaddr);
vdev->num_regions = 0;
kfree(vdev->regions);
}
static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
const char **extra_dbg)
{
if (VFIO_PLATFORM_IS_ACPI(vdev)) {
dev_info(vdev->device, "reset\n");
return vfio_platform_acpi_call_reset(vdev, extra_dbg);
} else if (vdev->of_reset) {
dev_info(vdev->device, "reset\n");
return vdev->of_reset(vdev);
}
dev_warn(vdev->device, "no reset function found!\n");
return -EINVAL;
}
void vfio_platform_close_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
const char *extra_dbg = NULL;
int ret;
ret = vfio_platform_call_reset(vdev, &extra_dbg);
if (WARN_ON(ret && vdev->reset_required)) {
dev_warn(
vdev->device,
"reset driver is required and reset call failed in release (%d) %s\n",
ret, extra_dbg ? extra_dbg : "");
}
pm_runtime_put(vdev->device);
vfio_platform_regions_cleanup(vdev);
vfio_platform_irq_cleanup(vdev);
}
EXPORT_SYMBOL_GPL(vfio_platform_close_device);
int vfio_platform_open_device(struct vfio_device *core_vdev)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
const char *extra_dbg = NULL;
int ret;
ret = vfio_platform_regions_init(vdev);
if (ret)
return ret;
ret = vfio_platform_irq_init(vdev);
if (ret)
goto err_irq;
ret = pm_runtime_get_sync(vdev->device);
if (ret < 0)
goto err_rst;
ret = vfio_platform_call_reset(vdev, &extra_dbg);
if (ret && vdev->reset_required) {
dev_warn(
vdev->device,
"reset driver is required and reset call failed in open (%d) %s\n",
ret, extra_dbg ? extra_dbg : "");
goto err_rst;
}
return 0;
err_rst:
pm_runtime_put(vdev->device);
vfio_platform_irq_cleanup(vdev);
err_irq:
vfio_platform_regions_cleanup(vdev);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_platform_open_device);
long vfio_platform_ioctl(struct vfio_device *core_vdev,
unsigned int cmd, unsigned long arg)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
unsigned long minsz;
if (cmd == VFIO_DEVICE_GET_INFO) {
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (vfio_platform_has_reset(vdev))
vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
info.flags = vdev->flags;
info.num_regions = vdev->num_regions;
info.num_irqs = vdev->num_irqs;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (info.index >= vdev->num_regions)
return -EINVAL;
/* map offset to the physical address */
info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
info.size = vdev->regions[info.index].size;
info.flags = vdev->regions[info.index].flags;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (info.index >= vdev->num_irqs)
return -EINVAL;
info.flags = vdev->irqs[info.index].flags;
info.count = vdev->irqs[info.index].count;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
u8 *data = NULL;
int ret = 0;
size_t data_size = 0;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs,
vdev->num_irqs, &data_size);
if (ret)
return ret;
if (data_size) {
data = memdup_user((void __user *)(arg + minsz),
data_size);
if (IS_ERR(data))
return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
hdr.start, hdr.count, data);
mutex_unlock(&vdev->igate);
kfree(data);
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
return vfio_platform_call_reset(vdev, NULL);
}
return -ENOTTY;
}
EXPORT_SYMBOL_GPL(vfio_platform_ioctl);
static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
char __user *buf, size_t count,
loff_t off)
{
unsigned int done = 0;
if (!reg->ioaddr) {
reg->ioaddr =
ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
while (count) {
size_t filled;
if (count >= 4 && !(off % 4)) {
u32 val;
val = ioread32(reg->ioaddr + off);
if (copy_to_user(buf, &val, 4))
goto err;
filled = 4;
} else if (count >= 2 && !(off % 2)) {
u16 val;
val = ioread16(reg->ioaddr + off);
if (copy_to_user(buf, &val, 2))
goto err;
filled = 2;
} else {
u8 val;
val = ioread8(reg->ioaddr + off);
if (copy_to_user(buf, &val, 1))
goto err;
filled = 1;
}
count -= filled;
done += filled;
off += filled;
buf += filled;
}
return done;
err:
return -EFAULT;
}
ssize_t vfio_platform_read(struct vfio_device *core_vdev,
char __user *buf, size_t count, loff_t *ppos)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
if (index >= vdev->num_regions)
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
return -EINVAL;
if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
return vfio_platform_read_mmio(&vdev->regions[index],
buf, count, off);
else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
return -EINVAL; /* not implemented */
return -EINVAL;
}
EXPORT_SYMBOL_GPL(vfio_platform_read);
static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
const char __user *buf, size_t count,
loff_t off)
{
unsigned int done = 0;
if (!reg->ioaddr) {
reg->ioaddr =
ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
while (count) {
size_t filled;
if (count >= 4 && !(off % 4)) {
u32 val;
if (copy_from_user(&val, buf, 4))
goto err;
iowrite32(val, reg->ioaddr + off);
filled = 4;
} else if (count >= 2 && !(off % 2)) {
u16 val;
if (copy_from_user(&val, buf, 2))
goto err;
iowrite16(val, reg->ioaddr + off);
filled = 2;
} else {
u8 val;
if (copy_from_user(&val, buf, 1))
goto err;
iowrite8(val, reg->ioaddr + off);
filled = 1;
}
count -= filled;
done += filled;
off += filled;
buf += filled;
}
return done;
err:
return -EFAULT;
}
ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
if (index >= vdev->num_regions)
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
return -EINVAL;
if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
return vfio_platform_write_mmio(&vdev->regions[index],
buf, count, off);
else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
return -EINVAL; /* not implemented */
return -EINVAL;
}
EXPORT_SYMBOL_GPL(vfio_platform_write);
static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
struct vm_area_struct *vma)
{
u64 req_len, pgoff, req_start;
req_len = vma->vm_end - vma->vm_start;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (region.size < PAGE_SIZE || req_start + req_len > region.size)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
req_len, vma->vm_page_prot);
}
int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
{
struct vfio_platform_device *vdev =
container_of(core_vdev, struct vfio_platform_device, vdev);
unsigned int index;
index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
if (index >= vdev->num_regions)
return -EINVAL;
if (vma->vm_start & ~PAGE_MASK)
return -EINVAL;
if (vma->vm_end & ~PAGE_MASK)
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
&& (vma->vm_flags & VM_READ))
return -EINVAL;
if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
&& (vma->vm_flags & VM_WRITE))
return -EINVAL;
vma->vm_private_data = vdev;
if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
return vfio_platform_mmap_mmio(vdev->regions[index], vma);
else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
return -EINVAL; /* not implemented */
return -EINVAL;
}
EXPORT_SYMBOL_GPL(vfio_platform_mmap);
static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
struct device *dev)
{
int ret;
ret = device_property_read_string(dev, "compatible",
&vdev->compat);
if (ret)
dev_err(dev, "Cannot retrieve compat for %s\n", vdev->name);
return ret;
}
/*
* There can be two kernel build combinations. One build where
* ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
*
* In the first case, vfio_platform_acpi_probe will return since
* acpi_disabled is 1. DT user will not see any kind of messages from
* ACPI.
*
* In the second case, both DT and ACPI is compiled in but the system is
* booting with any of these combinations.
*
* If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
* terminates immediately without any messages.
*
* If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
* valid checks. We cannot claim that this system is DT.
*/
int vfio_platform_init_common(struct vfio_platform_device *vdev)
{
int ret;
struct device *dev = vdev->vdev.dev;
ret = vfio_platform_acpi_probe(vdev, dev);
if (ret)
ret = vfio_platform_of_probe(vdev, dev);
if (ret)
return ret;
vdev->device = dev;
mutex_init(&vdev->igate);
ret = vfio_platform_get_reset(vdev);
if (ret && vdev->reset_required) {
dev_err(dev, "No reset function found for device %s\n",
vdev->name);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(vfio_platform_init_common);
void vfio_platform_release_common(struct vfio_platform_device *vdev)
{
vfio_platform_put_reset(vdev);
}
EXPORT_SYMBOL_GPL(vfio_platform_release_common);
void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
{
mutex_lock(&driver_lock);
list_add(&node->link, &reset_list);
mutex_unlock(&driver_lock);
}
EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
void vfio_platform_unregister_reset(const char *compat,
vfio_platform_reset_fn_t fn)
{
struct vfio_platform_reset_node *iter, *temp;
mutex_lock(&driver_lock);
list_for_each_entry_safe(iter, temp, &reset_list, link) {
if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
list_del(&iter->link);
break;
}
}
mutex_unlock(&driver_lock);
}
EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/vfio/platform/vfio_platform_common.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.